Index: VERSION ================================================================== --- VERSION +++ VERSION @@ -1,1 +1,1 @@ -1.37 +2.0 Index: src/attach.c ================================================================== --- src/attach.c +++ src/attach.c @@ -2,11 +2,11 @@ ** Copyright (c) 2010 D. Richard Hipp ** ** This program is free software; you can redistribute it and/or ** modify it under the terms of the Simplified BSD License (also ** known as the "2-Clause License" or "FreeBSD License".) - +** ** This program is distributed in the hope that it will be useful, ** but without any warranty; without even the implied warranty of ** merchantability or fitness for a particular purpose. ** ** Author contact information: Index: src/blob.c ================================================================== --- src/blob.c +++ src/blob.c @@ -2,11 +2,11 @@ ** Copyright (c) 2006 D. Richard Hipp ** ** This program is free software; you can redistribute it and/or ** modify it under the terms of the Simplified BSD License (also ** known as the "2-Clause License" or "FreeBSD License".) - +** ** This program is distributed in the hope that it will be useful, ** but without any warranty; without even the implied warranty of ** merchantability or fitness for a particular purpose. ** ** Author contact information: @@ -647,15 +647,19 @@ } pFrom->iCursor = i; } /* -** Return true if the blob contains a valid UUID_SIZE-digit base16 identifier. +** Return true if the blob contains a valid base16 identifier artifact hash. +** +** The value returned is actually one of HNAME_SHA1 OR HNAME_K256 if the +** hash is valid. Both of these are non-zero and therefore "true". +** If the hash is not valid, then HNAME_ERROR is returned, which is zero or +** false. */ -int blob_is_uuid(Blob *pBlob){ - return blob_size(pBlob)==UUID_SIZE - && validate16(blob_buffer(pBlob), UUID_SIZE); +int blob_is_hname(Blob *pBlob){ + return hname_validate(blob_buffer(pBlob), blob_size(pBlob)); } /* ** Return true if the blob contains a valid filename */ Index: src/browse.c ================================================================== --- src/browse.c +++ src/browse.c @@ -315,11 +315,11 @@ FileTreeNode *pSibling; /* Next element in the same subdirectory */ FileTreeNode *pChild; /* List of child nodes */ FileTreeNode *pLastChild; /* Last child on the pChild list */ char *zName; /* Name of this entry. The "tail" */ char *zFullName; /* Full pathname of this entry */ - char *zUuid; /* SHA1 hash of this file. May be NULL. */ + char *zUuid; /* Artifact hash of this file. May be NULL. */ double mtime; /* Modification time for this entry */ unsigned nFullName; /* Length of zFullName */ unsigned iLevel; /* Levels of parent directories */ }; @@ -345,11 +345,11 @@ ** the tree to be constructed properly. */ static void tree_add_node( FileTree *pTree, /* Tree into which nodes are added */ const char *zPath, /* The full pathname of file to add */ - const char *zUuid, /* UUID of the file. Might be NULL. */ + const char *zUuid, /* Hash of the file. Might be NULL. */ double mtime /* Modification time for this entry */ ){ int i; FileTreeNode *pParent; /* Parent (directory) of the next node to insert */ @@ -368,20 +368,20 @@ FileTreeNode *pNew; int iStart = i; int nByte; while( zPath[i] && zPath[i]!='/' ){ i++; } nByte = sizeof(*pNew) + i + 1; - if( zUuid!=0 && zPath[i]==0 ) nByte += UUID_SIZE+1; + if( zUuid!=0 && zPath[i]==0 ) nByte += HNAME_MAX+1; pNew = fossil_malloc( nByte ); memset(pNew, 0, sizeof(*pNew)); pNew->zFullName = (char*)&pNew[1]; memcpy(pNew->zFullName, zPath, i); pNew->zFullName[i] = 0; pNew->nFullName = i; if( zUuid!=0 && zPath[i]==0 ){ pNew->zUuid = pNew->zFullName + i + 1; - memcpy(pNew->zUuid, zUuid, UUID_SIZE+1); + memcpy(pNew->zUuid, zUuid, strlen(zUuid)+1); } pNew->zName = pNew->zFullName + iStart; if( pTree->pLast ){ pTree->pLast->pNext = pNew; }else{ Index: src/bundle.c ================================================================== --- src/bundle.c +++ src/bundle.c @@ -35,11 +35,11 @@ @ bcname TEXT, @ bcvalue ANY @ ); @ CREATE TABLE IF NOT EXISTS "%w".bblob( @ blobid INTEGER PRIMARY KEY, -- Blob ID -@ uuid TEXT NOT NULL, -- SHA1 hash of expanded blob +@ uuid TEXT NOT NULL, -- hash of expanded blob @ sz INT NOT NULL, -- Size of blob after expansion @ delta ANY, -- Delta compression basis, or NULL @ notes TEXT, -- Description of content @ data BLOB -- compressed content @ ); @@ -439,19 +439,19 @@ " WHERE bix.delta=%d" " AND bix.blobid=bblob.blobid;", iSrc ); while( db_step(&q)==SQLITE_ROW ){ - Blob h1, h2, c1, c2; + Blob h1, c1, c2; int rid; blob_zero(&h1); db_column_blob(&q, 0, &h1); blob_zero(&c1); db_column_blob(&q, 1, &c1); blob_uncompress(&c1, &c1); blob_zero(&c2); - if( db_column_type(&q,2)==SQLITE_TEXT && db_column_bytes(&q,2)==40 ){ + if( db_column_type(&q,2)==SQLITE_TEXT && db_column_bytes(&q,2)>=HNAME_MIN ){ Blob basis; rid = db_int(0,"SELECT rid FROM blob WHERE uuid=%Q", db_column_text(&q,2)); content_get(rid, &basis); blob_delta_apply(&basis, &c1, &c2); @@ -461,16 +461,13 @@ blob_delta_apply(pBasis, &c1, &c2); blob_reset(&c1); }else{ c2 = c1; } - sha1sum_blob(&c2, &h2); - if( blob_compare(&h1, &h2)!=0 ){ - fossil_fatal("SHA1 hash mismatch - wanted %s, got %s", - blob_str(&h1), blob_str(&h2)); + if( hname_verify_hash(&c2, blob_buffer(&h1), blob_size(&h1))==0 ){ + fossil_fatal("artifact hash error on %b", &h1); } - blob_reset(&h2); rid = content_put_ex(&c2, blob_str(&h1), 0, 0, isPriv); if( rid==0 ){ fossil_fatal("%s", g.zErrMsg); }else{ if( !isPriv ) content_make_public(rid); @@ -491,11 +488,11 @@ static void bundle_extract_item( int blobid, /* ID of the item to extract */ Blob *pOut /* Write the content into this blob */ ){ Stmt q; - Blob x, basis, h1, h2; + Blob x, basis, h1; static Bag busy; db_prepare(&q, "SELECT uuid, delta, data FROM bblob" " WHERE blobid=%d", blobid); if( db_step(&q)!=SQLITE_ROW ){ @@ -525,17 +522,14 @@ }else{ *pOut = x; } blob_zero(&h1); db_column_blob(&q, 0, &h1); - sha1sum_blob(pOut, &h2); - if( blob_compare(&h1, &h2)!=0 ){ - fossil_fatal("SHA1 hash mismatch - wanted %s, got %s", - blob_str(&h1), blob_str(&h2)); + if( hname_verify_hash(pOut, blob_buffer(&h1), blob_size(&h1))==0 ){ + fossil_fatal("incorrect hash for artifact %b", &h1); } blob_reset(&h1); - blob_reset(&h2); bag_remove(&busy, blobid); db_finalize(&q); } /* fossil bundle cat BUNDLE UUID... @@ -597,12 +591,13 @@ ** repo, then the delta encodings cannot be decoded and the bundle cannot ** be extracted. */ zMissingDeltas = db_text(0, "SELECT group_concat(substr(delta,1,10),' ')" " FROM bblob" - " WHERE typeof(delta)='text' AND length(delta)=40" - " AND NOT EXISTS(SELECT 1 FROM blob WHERE uuid=bblob.delta)"); + " WHERE typeof(delta)='text' AND length(delta)>=%d" + " AND NOT EXISTS(SELECT 1 FROM blob WHERE uuid=bblob.delta)", + HNAME_MIN); if( zMissingDeltas && zMissingDeltas[0] ){ fossil_fatal("delta basis artifacts not found in repository: %s", zMissingDeltas); } Index: src/checkin.c ================================================================== --- src/checkin.c +++ src/checkin.c @@ -377,11 +377,11 @@ ** As a special case, the --no-merge option does not inhibit this default. ** This default shows exactly the set of changes that would be checked ** in by the commit command. ** ** If no filter options are used, or if the --merge option is used, the -** SHA1 hash of each merge contributor check-in version is displayed at +** artifact hash of each merge contributor check-in version is displayed at ** the end of the report. The --no-merge option is useful to display the ** default set of changed files without the merge contributors. ** ** If change type classification is enabled, each output line starts with ** a code describing the file's change type, e.g. EDITED or RENAMED. It @@ -412,11 +412,11 @@ ** ** General options: ** --abs-paths Display absolute pathnames. ** --rel-paths Display pathnames relative to the current working ** directory. -** --sha1sum Verify file status using SHA1 hashing rather than +** --hash Verify file status using hashing rather than ** relying on file mtimes. ** --case-sensitive Override case-sensitive setting. ** --dotfiles Include unmanaged files beginning with a dot. ** --ignore Ignore unmanaged files matching CSG glob patterns. ** --no-dir-symlinks Disables support for directory symlinks. @@ -464,11 +464,11 @@ {"no-merge", C_MERGE }, {"no-classify", C_CLASSIFY }, }; Blob report = BLOB_INITIALIZER; enum {CHANGES, STATUS} command = *g.argv[1]=='s' ? STATUS : CHANGES; - int useSha1sum = find_option("sha1sum", 0, 0)!=0; + int useHash = find_option("hash", 0, 0)!=0; int showHdr = command==CHANGES && find_option("header", 0, 0); int verboseFlag = command==CHANGES && find_option("verbose", "v", 0); const char *zIgnoreFlag = find_option("ignore", 0, 1); unsigned scanFlags = 0; unsigned flags = 0; @@ -528,11 +528,11 @@ /* We should be done with options. */ verify_all_options(); /* Check for changed files. */ - vfile_check_signature(vid, useSha1sum ? CKSIG_SHA1 : 0); + vfile_check_signature(vid, useHash ? CKSIG_HASH : 0); /* Search for unmanaged files if requested. */ if( flags & C_EXTRA ){ Glob *pIgnore = glob_create(zIgnoreFlag); locate_unmanaged_files(g.argc-2, g.argv+2, scanFlags, pIgnore); @@ -1985,12 +1985,12 @@ ** The --private option creates a private check-in that is never synced. ** Children of private check-ins are automatically private. ** ** The --tag option applies the symbolic tag name to the check-in. ** -** The --sha1sum option detects edited files by computing each file's -** SHA1 hash rather than just checking for changes to its size or mtime. +** The --hash option detects edited files by computing each file's +** artifact hash rather than just checking for changes to its size or mtime. ** ** Options: ** --allow-conflict allow unresolved merge conflicts ** --allow-empty allow a commit with no changes ** --allow-fork allow the commit to fork @@ -2010,11 +2010,11 @@ ** input and assumes an answer of 'No' for every ** question. ** --no-warnings omit all warnings about file contents ** --nosign do not attempt to sign this commit with gpg ** --private do not sync changes and their descendants -** --sha1sum verify file status using SHA1 hashing rather +** --hash verify file status using hashing rather ** than relying on file mtimes ** --tag TAG-NAME assign given tag TAG-NAME to the check-in ** --date-override DATETIME DATE to use instead of 'now' ** --user-override USER USER to use instead of the current default ** @@ -2033,11 +2033,11 @@ int nvid; /* Blob-id of the new check-in */ Blob comment; /* Check-in comment */ const char *zComment; /* Check-in comment */ Stmt q; /* Various queries */ char *zUuid; /* UUID of the new check-in */ - int useSha1sum = 0; /* True to verify file status using SHA1 hashing */ + int useHash = 0; /* True to verify file status using hashing */ int noSign = 0; /* True to omit signing the manifest using GPG */ int isAMerge = 0; /* True if checking in a merge */ int noWarningFlag = 0; /* True if skipping all warnings */ int noPrompt = 0; /* True if skipping all prompts */ int forceFlag = 0; /* Undocumented: Disables all checks */ @@ -2068,11 +2068,11 @@ Blob ans; char cReply; memset(&sCiInfo, 0, sizeof(sCiInfo)); url_proxy_options(); - useSha1sum = find_option("sha1sum", 0, 0)!=0; + useHash = find_option("hash", 0, 0)!=0; noSign = find_option("nosign",0,0)!=0; forceDelta = find_option("delta",0,0)!=0; forceBaseline = find_option("baseline",0,0)!=0; if( forceDelta && forceBaseline ){ fossil_fatal("cannot use --delta and --baseline together"); @@ -2232,11 +2232,11 @@ */ if( !db_exists("SELECT 1 FROM user WHERE login=%Q", g.zLogin) ){ fossil_fatal("no such user: %s", g.zLogin); } - hasChanges = unsaved_changes(useSha1sum ? CKSIG_SHA1 : 0); + hasChanges = unsaved_changes(useHash ? CKSIG_HASH : 0); db_begin_transaction(); db_record_repository_filename(0); if( hasChanges==0 && !isAMerge && !allowEmpty && !forceFlag ){ fossil_fatal("nothing has changed; use --allow-empty to override"); } Index: src/content.c ================================================================== --- src/content.c +++ src/content.c @@ -312,11 +312,11 @@ /* ** COMMAND: artifact* ** ** Usage: %fossil artifact ARTIFACT-ID ?OUTPUT-FILENAME? ?OPTIONS? ** -** Extract an artifact by its SHA1 hash and write the results on +** Extract an artifact by its artifact hash and write the results on ** standard output, or if the optional 4th argument is given, in ** the named output file. ** ** Options: ** -R|--repository FILE Extract artifacts from repository FILE @@ -497,11 +497,11 @@ ** to be responsible for pBlob. This routine does *not* take over ** responsibility for freeing pBlob. */ int content_put_ex( Blob *pBlob, /* Content to add to the repository */ - const char *zUuid, /* SHA1 hash of reconstructed pBlob */ + const char *zUuid, /* artifact hash of reconstructed pBlob */ int srcId, /* pBlob is a delta from this entry */ int nBlob, /* pBlob is compressed. Original size is this */ int isPrivate /* The content should be marked private */ ){ int size; @@ -513,13 +513,23 @@ int isDephantomize = 0; assert( g.repositoryOpen ); assert( pBlob!=0 ); assert( srcId==0 || zUuid!=0 ); + db_begin_transaction(); if( zUuid==0 ){ assert( nBlob==0 ); - sha1sum_blob(pBlob, &hash); + /* First check the auxiliary hash to see if there is already an artifact + ** that uses the auxiliary hash name */ + hname_hash(pBlob, 1, &hash); + rid = fast_uuid_to_rid(blob_str(&hash)); + if( rid==0 ){ + /* No existing artifact with the auxiliary hash name. Therefore, use + ** the primary hash name. */ + blob_reset(&hash); + hname_hash(pBlob, 0, &hash); + } }else{ blob_init(&hash, zUuid, -1); } if( nBlob ){ size = nBlob; @@ -527,11 +537,10 @@ size = blob_size(pBlob); if( srcId ){ size = delta_output_size(blob_buffer(pBlob), size); } } - db_begin_transaction(); /* Check to see if the entry already exists and if it does whether ** or not the entry is a phantom */ db_prepare(&s1, "SELECT rid, size FROM blob WHERE uuid=%B", &hash); @@ -868,11 +877,10 @@ ** so forth, reporting any errors found. */ void test_integrity(void){ Stmt q; Blob content; - Blob cksum; int n1 = 0; int n2 = 0; int nErr = 0; int total; int nCA = 0; @@ -905,10 +913,11 @@ db_prepare(&q, "SELECT rid, uuid, size FROM blob ORDER BY rid"); total = db_int(0, "SELECT max(rid) FROM blob"); while( db_step(&q)==SQLITE_ROW ){ int rid = db_column_int(&q, 0); const char *zUuid = db_column_text(&q, 1); + int nUuid = db_column_bytes(&q, 1); int size = db_column_int(&q, 2); n1++; fossil_print(" %d/%d\r", n1, total); fflush(stdout); if( size<0 ){ @@ -919,14 +928,12 @@ if( blob_size(&content)!=size ){ fossil_print("size mismatch on artifact %d: wanted %d but got %d\n", rid, size, blob_size(&content)); nErr++; } - sha1sum_blob(&content, &cksum); - if( fossil_strcmp(blob_str(&cksum), zUuid)!=0 ){ - fossil_print("wrong hash on artifact %d: wanted %s but got %s\n", - rid, zUuid, blob_str(&cksum)); + if( !hname_verify_hash(&content, zUuid, nUuid) ){ + fossil_print("wrong hash on artifact %d\n",rid); nErr++; } if( bParse && looks_like_control_artifact(&content) ){ Blob err; int i, n; @@ -941,11 +948,11 @@ memcpy(zFirstLine, z, i); zFirstLine[i] = 0; p = manifest_parse(&content, 0, &err); if( p==0 ){ fossil_print("manifest_parse failed for %s:\n%s\n", - blob_str(&cksum), blob_str(&err)); + zUuid, blob_str(&err)); if( strncmp(blob_str(&err), "line 1:", 7)==0 ){ fossil_print("\"%s\"\n", zFirstLine); } }else{ anCA[p->type]++; @@ -954,11 +961,10 @@ } blob_reset(&err); }else{ blob_reset(&content); } - blob_reset(&cksum); n2++; } db_finalize(&q); fossil_print("%d non-phantom blobs (out of %d total) checked: %d errors\n", n2, n1, nErr); @@ -1157,11 +1163,11 @@ ** ** WARNING: You must run "fossil rebuild" after this command to rebuild ** the metadata. ** ** Note that the arguments are the integer raw RID values from the BLOB table, -** not SHA1 hashs or labels. +** not artifact hashs or labels. */ void test_content_erase(void){ int i; Blob x; char c; Index: src/db.c ================================================================== --- src/db.c +++ src/db.c @@ -2,11 +2,11 @@ ** Copyright (c) 2006 D. Richard Hipp ** ** This program is free software; you can redistribute it and/or ** modify it under the terms of the Simplified BSD License (also ** known as the "2-Clause License" or "FreeBSD License".) - +** ** This program is distributed in the hope that it will be useful, ** but without any warranty; without even the implied warranty of ** merchantability or fitness for a particular purpose. ** ** Author contact information: @@ -1486,34 +1486,14 @@ /* Cache "allow-symlinks" option, because we'll need it on every stat call */ g.allowSymlinks = db_get_boolean("allow-symlinks", db_allow_symlinks_by_default()); g.zAuxSchema = db_get("aux-schema",""); - /* Verify that the PLINK table has a new column added by the - ** 2014-11-28 schema change. Create it if necessary. This code - ** can be removed in the future, once all users have upgraded to the - ** 2014-11-28 or later schema. - */ - if( !db_table_has_column("repository","plink","baseid") ){ - db_multi_exec( - "ALTER TABLE repository.plink ADD COLUMN baseid;" - ); - } - - /* Verify that the MLINK table has the newer columns added by the - ** 2015-01-24 schema change. Create them if necessary. This code - ** can be removed in the future, once all users have upgraded to the - ** 2015-01-24 or later schema. - */ - if( !db_table_has_column("repository","mlink","isaux") ){ - db_begin_transaction(); - db_multi_exec( - "ALTER TABLE repository.mlink ADD COLUMN pmid INTEGER DEFAULT 0;" - "ALTER TABLE repository.mlink ADD COLUMN isaux BOOLEAN DEFAULT 0;" - ); - db_end_transaction(0); - } + /* If the ALIAS table is not present, then some on-the-fly schema + ** updates might be required. + */ + rebuild_schema_update_2_0(); /* Do the Fossil-2.0 schema updates */ } /* ** Flags for the db_find_and_open_repository() function. */ @@ -2082,29 +2062,29 @@ sqlite3_result_value(context, argv[2-rc]); } } /* -** Convert the input string into an SHA1. Make a notation in the +** Convert the input string into a artifact hash. Make a notation in the ** CONCEALED table so that the hash can be undo using the db_reveal() ** function at some later time. ** ** The value returned is stored in static space and will be overwritten ** on subsequent calls. ** -** If zContent is already a well-formed SHA1 hash, then return a copy +** If zContent is already a well-formed artifact hash, then return a copy ** of that hash, not a hash of the hash. ** ** The CONCEALED table is meant to obscure email addresses. Every valid ** email address will contain a "@" character and "@" is not valid within -** an SHA1 hash so there is no chance that a valid email address will go +** a SHA1 hash so there is no chance that a valid email address will go ** unconcealed. */ char *db_conceal(const char *zContent, int n){ - static char zHash[42]; + static char zHash[HNAME_MAX+1]; Blob out; - if( n==40 && validate16(zContent, n) ){ + if( hname_validate(zContent, n) ){ memcpy(zHash, zContent, n); zHash[n] = 0; }else{ sha1sum_step_text(zContent, n); sha1sum_finish(&out); Index: src/diff.c ================================================================== --- src/diff.c +++ src/diff.c @@ -2278,11 +2278,11 @@ ** URL: /annotate?checkin=ID&filename=FILENAME ** URL: /blame?checkin=ID&filename=FILENAME ** URL: /praise?checkin=ID&filename=FILENAME ** ** Show the most recent change to each line of a text file. /annotate shows -** the date of the changes and the check-in SHA1 hash (with a link to the +** the date of the changes and the check-in hash (with a link to the ** check-in). /blame and /praise also show the user who made the check-in. ** ** Query parameters: ** ** checkin=ID The manifest ID at which to start the annotation Index: src/doc.c ================================================================== --- src/doc.c +++ src/doc.c @@ -527,11 +527,11 @@ ** WEBPAGE: uv ** WEBPAGE: doc ** URL: /uv/FILE ** URL: /doc/CHECKIN/FILE ** -** CHECKIN can be either tag or SHA1 hash or timestamp identifying a +** CHECKIN can be either tag or hash prefix or timestamp identifying a ** particular check, or the name of a branch (meaning the most recent ** check-in on that branch) or one of various magic words: ** ** "tip" means the most recent check-in ** Index: src/event.c ================================================================== --- src/event.c +++ src/event.c @@ -384,11 +384,11 @@ rid = db_int(0, "SELECT rid FROM tagxref" " WHERE tagid=(SELECT tagid FROM tag WHERE tagname GLOB '%q*')" " ORDER BY mtime DESC", zTag ); - if( rid && strlen(zId)<40 ){ + if( rid && strlen(zId)mark name<->fossil sha1. */ + ** valid mapping from git hash<->mark name<->fossil hash. */ unsigned int mid; if( type_=='c' ){ mid = COMMITMARK(mark->rid); } else{ Index: src/foci.c ================================================================== --- src/foci.c +++ src/foci.c @@ -28,11 +28,11 @@ ** The "schema" for the temp.foci table is: ** ** CREATE TABLE files_of_checkin( ** checkinID INTEGER, -- RID for the check-in manifest ** filename TEXT, -- Name of a file -** uuid TEXT, -- SHA1 hash of the file +** uuid TEXT, -- hash of the file ** previousName TEXT, -- Name of the file in previous check-in ** perm TEXT, -- Permissions on the file ** symname TEXT HIDDEN -- Symbolic name of the check-in. ** ); ** @@ -54,11 +54,11 @@ */ static const char zFociSchema[] = @ CREATE TABLE files_of_checkin( @ checkinID INTEGER, -- RID for the check-in manifest @ filename TEXT, -- Name of a file -@ uuid TEXT, -- SHA1 hash of the file +@ uuid TEXT, -- hash of the file @ previousName TEXT, -- Name of the file in previous check-in @ perm TEXT, -- Permissions on the file @ symname TEXT HIDDEN -- Symbolic name of the check-in @ ); ; Index: src/fusefs.c ================================================================== --- src/fusefs.c +++ src/fusefs.c @@ -294,11 +294,11 @@ ** This command uses the Fuse Filesystem (FuseFS) to mount a directory ** at DIRECTORY that contains the content of all check-ins in the ** repository. The names of files are DIRECTORY/checkins/VERSION/PATH ** where DIRECTORY is the root of the mount, VERSION is any valid ** check-in name (examples: "trunk" or "tip" or a tag or any unique -** prefix of a SHA1 hash, etc) and PATH is the pathname of the file in +** prefix of an artifact hash, etc) and PATH is the pathname of the file in ** the check-in. If DIRECTORY does not exist, then an attempt is made ** to create it. ** ** The DIRECTORY/checkins directory is not searchable so one cannot ** do "ls DIRECTORY/checkins" to get a listing of all possible check-in Index: src/graph.c ================================================================== --- src/graph.c +++ src/graph.c @@ -179,11 +179,11 @@ int rid, /* RID for the check-in */ int nParent, /* Number of parents */ int *aParent, /* Array of parents */ const char *zBranch, /* Branch for this check-in */ const char *zBgClr, /* Background color. NULL or "" for white. */ - const char *zUuid, /* SHA1 hash of the object being graphed */ + const char *zUuid, /* hash name of the object being graphed */ int isLeaf /* True if this row is a leaf */ ){ GraphRow *pRow; int nByte; ADDED src/hname.c Index: src/hname.c ================================================================== --- /dev/null +++ src/hname.c @@ -0,0 +1,157 @@ +/* +** Copyright (c) 2017 D. Richard Hipp +** +** This program is free software; you can redistribute it and/or +** modify it under the terms of the Simplified BSD License (also +** known as the "2-Clause License" or "FreeBSD License".) +** +** This program is distributed in the hope that it will be useful, +** but without any warranty; without even the implied warranty of +** merchantability or fitness for a particular purpose. +** +** Author contact information: +** drh@hwaci.com +** http://www.hwaci.com/drh/ +** +******************************************************************************* +** +** This file contains generic code for dealing with hashes used for +** naming artifacts. Specific hash algorithms are implemented separately +** (for example in sha1.c and sha3.c). This file contains the generic +** interface code. +*/ +#include "config.h" +#include "hname.h" + + +#if INTERFACE +/* +** Code numbers for the allowed hash algorithms. +*/ +#define HNAME_ERROR 0 /* Not a valid hash */ +#define HNAME_SHA1 1 /* SHA1 */ +#define HNAME_K256 2 /* SHA3-256 */ + +/* +** Minimum and maximum lengths for a hash value when hex encoded. +*/ +#define HNAME_MIN 40 /* Length for SHA1 */ +#define HNAME_MAX 64 /* Length for SHA3-256 */ + +/* +** Hash lengths for the various algorithms +*/ +#define HNAME_LEN_SHA1 40 +#define HNAME_LEN_K256 64 + +/* +** The number of distinct hash algorithms: +*/ +#define HNAME_COUNT 2 /* Just SHA1 and SHA3-256. Let's keep it that way! */ + +#endif /* INTERFACE */ + +/* +** Return the integer hash algorithm code number (ex: HNAME_K224) for +** the hash string provided. Or return HNAME_ERROR (0) if the input string +** is not a valid artifact hash string. +*/ +int hname_validate(const char *zHash, int nHash){ + int id; + switch( nHash ){ + case HNAME_LEN_SHA1: id = HNAME_SHA1; break; + case HNAME_LEN_K256: id = HNAME_K256; break; + default: return HNAME_ERROR; + } + if( !validate16(zHash, nHash) ) return HNAME_ERROR; + return id; +} + +/* +** Verify that zHash is a valid hash for the content in pContent. +** Return true if the hash is correct. Return false if the content +** does not match the hash. +** +** Actually, the returned value is one of the hash algorithm constants +** corresponding to the hash that matched if the hash is correct. +** (Examples: HNAME_SHA1 or HNAME_K224). And the return is HNAME_ERROR +** if the hash does not match. +*/ +int hname_verify_hash(Blob *pContent, const char *zHash, int nHash){ + int id = HNAME_ERROR; + switch( nHash ){ + case HNAME_LEN_SHA1: { + Blob hash; + sha1sum_blob(pContent, &hash); + if( memcmp(blob_buffer(&hash),zHash,HNAME_LEN_SHA1)==0 ) id = HNAME_SHA1; + blob_reset(&hash); + break; + } + case HNAME_LEN_K256: { + sha3sum_init(256); + sha3sum_step_blob(pContent); + if( memcmp(sha3sum_finish(0),zHash,64)==0 ) id = HNAME_K256; + break; + } + } + return id; +} + +/* +** Verify that zHash is a valid hash for the content of a file on +** disk named zFile. +** +** Return true if the hash is correct. Return false if the content +** does not match the hash. +** +** Actually, the returned value is one of the hash algorithm constants +** corresponding to the hash that matched if the hash is correct. +** (Examples: HNAME_SHA1 or HNAME_K224). And the return is HNAME_ERROR +** if the hash does not match. +*/ +int hname_verify_file_hash(const char *zFile, const char *zHash, int nHash){ + int id = HNAME_ERROR; + switch( nHash ){ + case HNAME_LEN_SHA1: { + Blob hash; + sha1sum_file(zFile, &hash); + if( memcmp(blob_buffer(&hash),zHash,HNAME_LEN_SHA1)==0 ) id = HNAME_SHA1; + blob_reset(&hash); + break; + } + case HNAME_LEN_K256: { + Blob hash; + sha3sum_file(zFile, 256, &hash); + if( memcmp(blob_buffer(&hash),zHash,64)==0 ) id = HNAME_LEN_K256; + blob_reset(&hash); + break; + } + } + return id; +} + +/* +** Compute a hash on blob pContent. Write the hash into blob pHashOut. +** This routine assumes that pHashOut is uninitialized. +** +** The preferred hash is used for iHType==0, and various alternative hashes +** are used for iHType>0 && iHType=20100 + /* For Fossil 2.1 and later, the preferred hash algorithm is SHA3-256 and + ** SHA1 is the secondary hash algorithm. */ + switch( iHType ){ + case 0: sha3sum_blob(pContent, 256, pHashOut); break; + case 1: sha1sum_blob(pContent, pHashOut); break; + } +#else + /* Prior to Fossil 2.1, the preferred hash algorithm is SHA1 (for backwards + ** compatibility with Fossil 1.x) and SHA3-256 is the only auxiliary + ** algorithm */ + switch( iHType ){ + case 0: sha1sum_blob(pContent, pHashOut); break; + case 1: sha3sum_blob(pContent, 256, pHashOut); break; + } +#endif +} Index: src/import.c ================================================================== --- src/import.c +++ src/import.c @@ -149,18 +149,18 @@ ** UUID in gg.zPrevCheckin. */ static int fast_insert_content( Blob *pContent, /* Content to insert */ const char *zMark, /* Label using this mark, if not NULL */ - int saveUuid, /* Save SHA1 hash in gg.zPrevCheckin */ + int saveUuid, /* Save artifact hash in gg.zPrevCheckin */ int doParse /* Invoke manifest_crosslink() */ ){ Blob hash; Blob cmpr; int rid; - sha1sum_blob(pContent, &hash); + hname_hash(pContent, 0, &hash); rid = db_int(0, "SELECT rid FROM blob WHERE uuid=%B", &hash); if( rid==0 ){ static Stmt ins; db_static_prepare(&ins, "INSERT INTO blob(uuid, size, content) VALUES(:uuid, :size, :content)" Index: src/info.c ================================================================== --- src/info.c +++ src/info.c @@ -1385,10 +1385,11 @@ " ORDER BY mtime DESC /*sort*/", rid ); while( db_step(&q)==SQLITE_ROW ){ const char *zTarget = db_column_text(&q, 0); + int nTarget = db_column_bytes(&q, 0); const char *zFilename = db_column_text(&q, 1); const char *zDate = db_column_text(&q, 2); const char *zUser = db_column_text(&q, 3); /* const char *zSrc = db_column_text(&q, 4); */ if( cnt>0 ){ @@ -1395,11 +1396,11 @@ @ Also attachment "%h(zFilename)" to }else{ @ Attachment "%h(zFilename)" to } objType |= OBJTYPE_ATTACHMENT; - if( strlen(zTarget)==UUID_SIZE && validate16(zTarget,UUID_SIZE) ){ + if( nTarget==UUID_SIZE && validate16(zTarget,UUID_SIZE) ){ if ( db_exists("SELECT 1 FROM tag WHERE tagname='tkt-%q'", zTarget) ){ if( g.perm.Hyperlink && g.anon.RdTkt ){ @ ticket [%z(href("%R/tktview?name=%!S",zTarget))%S(zTarget)] Index: src/main.c ================================================================== --- src/main.c +++ src/main.c @@ -50,11 +50,16 @@ # include "cson_amalgamation.h" /* JSON API. */ # include "json_detail.h" #endif /* -** Size of a UUID in characters +** Size of a UUID in characters. A UUID is a randomly generated +** lower-case hexadecimal number used to identify tickets. +** +** In Fossil 1.x, UUID also referred to a SHA1 artifact hash. But that +** usage is now obsolete. The term UUID should now mean only a very large +** random number used as a unique identifier for tickets or other objects. */ #define UUID_SIZE 40 /* ** Maximum number of auxiliary parameters on reports Index: src/main.mk ================================================================== --- src/main.mk +++ src/main.mk @@ -52,10 +52,11 @@ $(SRCDIR)/fshell.c \ $(SRCDIR)/fusefs.c \ $(SRCDIR)/glob.c \ $(SRCDIR)/graph.c \ $(SRCDIR)/gzip.c \ + $(SRCDIR)/hname.c \ $(SRCDIR)/http.c \ $(SRCDIR)/http_socket.c \ $(SRCDIR)/http_ssl.c \ $(SRCDIR)/http_transport.c \ $(SRCDIR)/import.c \ @@ -228,10 +229,11 @@ $(OBJDIR)/fshell_.c \ $(OBJDIR)/fusefs_.c \ $(OBJDIR)/glob_.c \ $(OBJDIR)/graph_.c \ $(OBJDIR)/gzip_.c \ + $(OBJDIR)/hname_.c \ $(OBJDIR)/http_.c \ $(OBJDIR)/http_socket_.c \ $(OBJDIR)/http_ssl_.c \ $(OBJDIR)/http_transport_.c \ $(OBJDIR)/import_.c \ @@ -353,10 +355,11 @@ $(OBJDIR)/fshell.o \ $(OBJDIR)/fusefs.o \ $(OBJDIR)/glob.o \ $(OBJDIR)/graph.o \ $(OBJDIR)/gzip.o \ + $(OBJDIR)/hname.o \ $(OBJDIR)/http.o \ $(OBJDIR)/http_socket.o \ $(OBJDIR)/http_ssl.o \ $(OBJDIR)/http_transport.o \ $(OBJDIR)/import.o \ @@ -639,10 +642,11 @@ $(OBJDIR)/fshell_.c:$(OBJDIR)/fshell.h \ $(OBJDIR)/fusefs_.c:$(OBJDIR)/fusefs.h \ $(OBJDIR)/glob_.c:$(OBJDIR)/glob.h \ $(OBJDIR)/graph_.c:$(OBJDIR)/graph.h \ $(OBJDIR)/gzip_.c:$(OBJDIR)/gzip.h \ + $(OBJDIR)/hname_.c:$(OBJDIR)/hname.h \ $(OBJDIR)/http_.c:$(OBJDIR)/http.h \ $(OBJDIR)/http_socket_.c:$(OBJDIR)/http_socket.h \ $(OBJDIR)/http_ssl_.c:$(OBJDIR)/http_ssl.h \ $(OBJDIR)/http_transport_.c:$(OBJDIR)/http_transport.h \ $(OBJDIR)/import_.c:$(OBJDIR)/import.h \ @@ -1041,10 +1045,18 @@ $(OBJDIR)/gzip.o: $(OBJDIR)/gzip_.c $(OBJDIR)/gzip.h $(SRCDIR)/config.h $(XTCC) -o $(OBJDIR)/gzip.o -c $(OBJDIR)/gzip_.c $(OBJDIR)/gzip.h: $(OBJDIR)/headers + +$(OBJDIR)/hname_.c: $(SRCDIR)/hname.c $(OBJDIR)/translate + $(OBJDIR)/translate $(SRCDIR)/hname.c >$@ + +$(OBJDIR)/hname.o: $(OBJDIR)/hname_.c $(OBJDIR)/hname.h $(SRCDIR)/config.h + $(XTCC) -o $(OBJDIR)/hname.o -c $(OBJDIR)/hname_.c + +$(OBJDIR)/hname.h: $(OBJDIR)/headers $(OBJDIR)/http_.c: $(SRCDIR)/http.c $(OBJDIR)/translate $(OBJDIR)/translate $(SRCDIR)/http.c >$@ $(OBJDIR)/http.o: $(OBJDIR)/http_.c $(OBJDIR)/http.h $(SRCDIR)/config.h Index: src/makemake.tcl ================================================================== --- src/makemake.tcl +++ src/makemake.tcl @@ -58,10 +58,11 @@ fshell fusefs glob graph gzip + hname http http_socket http_transport import info Index: src/manifest.c ================================================================== --- src/manifest.c +++ src/manifest.c @@ -54,11 +54,11 @@ /* ** A single F-card within a manifest */ struct ManifestFile { char *zName; /* Name of a file */ - char *zUuid; /* UUID of the file */ + char *zUuid; /* Artifact hash for the file */ char *zPerm; /* File permissions */ char *zPrior; /* Prior name if the name was changed */ }; @@ -77,35 +77,35 @@ char *zRepoCksum; /* MD5 checksum of the baseline content. R card. */ char *zWiki; /* Text of the wiki page. W card. */ char *zWikiTitle; /* Name of the wiki page. L card. */ char *zMimetype; /* Mime type of wiki or comment text. N card. */ double rEventDate; /* Date of an event. E card. */ - char *zEventId; /* UUID for an event. E card. */ + char *zEventId; /* Artifact hash for an event. E card. */ char *zTicketUuid; /* UUID for a ticket. K card. */ char *zAttachName; /* Filename of an attachment. A card. */ - char *zAttachSrc; /* UUID of document being attached. A card. */ + char *zAttachSrc; /* Artifact hash for document being attached. A card. */ char *zAttachTarget; /* Ticket or wiki that attachment applies to. A card */ int nFile; /* Number of F cards */ int nFileAlloc; /* Slots allocated in aFile[] */ int iFile; /* Index of current file in iterator */ ManifestFile *aFile; /* One entry for each F-card */ int nParent; /* Number of parents. */ int nParentAlloc; /* Slots allocated in azParent[] */ - char **azParent; /* UUIDs of parents. One for each P card argument */ + char **azParent; /* Hashes of parents. One for each P card argument */ int nCherrypick; /* Number of entries in aCherrypick[] */ struct { - char *zCPTarget; /* UUID of cherry-picked version w/ +|- prefix */ - char *zCPBase; /* UUID of cherry-pick baseline. NULL for singletons */ + char *zCPTarget; /* Hash for cherry-picked version w/ +|- prefix */ + char *zCPBase; /* Hash for cherry-pick baseline. NULL for singletons */ } *aCherrypick; int nCChild; /* Number of cluster children */ int nCChildAlloc; /* Number of closts allocated in azCChild[] */ - char **azCChild; /* UUIDs of referenced objects in a cluster. M cards */ + char **azCChild; /* Hashes of referenced objects in a cluster. M cards */ int nTag; /* Number of T Cards */ int nTagAlloc; /* Slots allocated in aTag[] */ struct TagType { char *zName; /* Name of the tag */ - char *zUuid; /* UUID that the tag is applied to */ + char *zUuid; /* Hash of artifact that the tag is applied to */ char *zValue; /* Value if the tag is really a property */ } *aTag; /* One for each T card */ int nField; /* Number of J cards */ int nFieldAlloc; /* Slots allocated in aField[] */ struct { @@ -327,26 +327,27 @@ ** takes over the input blob and will free it when the ** Manifest object is freed. Zeros are inserted into the blob ** as string terminators so that blob should not be used again. ** ** Return a pointer to an allocated Manifest object if the content -** really is a control file of some kind. This object needs to be -** freed by a subsequent call to manifest_destroy(). Return NULL -** if there are syntax errors. +** really is a structural artifact of some kind. The returned Manifest +** object needs to be freed by a subsequent call to manifest_destroy(). +** Return NULL if there are syntax errors or if the input blob does +** not describe a valid structural artifact. ** -** This routine is strict about the format of a control file. +** This routine is strict about the format of a structural artifacts. ** The format must match exactly or else it is rejected. This -** rule minimizes the risk that a content file will be mistaken -** for a control file simply because they look the same. +** rule minimizes the risk that a content artifact will be mistaken +** for a structural artifact simply because they look the same. ** ** The pContent is reset. If a pointer is returned, then pContent will ** be reset when the Manifest object is cleared. If NULL is ** returned then the Manifest object is cleared automatically ** and pContent is reset before the return. ** -** The entire file can be PGP clear-signed. The signature is ignored. -** The file consists of zero or more cards, one card per line. +** The entire input blob can be PGP clear-signed. The signature is ignored. +** The artifact consists of zero or more cards, one card per line. ** (Except: the content of the W card can extend of multiple lines.) ** Each card is divided into tokens by a single space character. ** The first token is a single upper-case letter which is the card type. ** The card type determines the other parameters to the card. ** Cards must occur in lexicographical order. @@ -361,11 +362,10 @@ char *z; int n; char *zUuid; int sz = 0; int isRepeat, hasSelfRefTag = 0; - Blob bUuid = BLOB_INITIALIZER; static Bag seen; const char *zErr = 0; if( rid==0 ){ isRepeat = 1; @@ -374,11 +374,11 @@ }else{ isRepeat = 0; bag_insert(&seen, rid); } - /* Every control artifact ends with a '\n' character. Exit early + /* Every structural artifact ends with a '\n' character. Exit early ** if that is not the case for this artifact. */ if( !isRepeat ) g.parseCnt[0]++; z = blob_materialize(pContent); n = blob_size(pContent); @@ -406,15 +406,10 @@ blob_reset(pContent); blob_appendf(pErr, "incorrect Z-card cksum"); return 0; } - /* Store the UUID (before modifying the blob) only for error - ** reporting purposes. - */ - sha1sum_blob(pContent, &bUuid); - /* Allocate a Manifest object to hold the parsed control artifact. */ p = fossil_malloc( sizeof(*p) ); memset(p, 0, sizeof(*p)); memcpy(&p->content, pContent, sizeof(p->content)); @@ -449,15 +444,15 @@ defossilize(zName); if( !file_is_simple_pathname(zName, 0) ){ SYNTAX("invalid filename on A-card"); } defossilize(zTarget); - if( (nTarget!=UUID_SIZE || !validate16(zTarget, UUID_SIZE)) + if( !hname_validate(zTarget,nTarget) && !wiki_name_is_wellformed((const unsigned char *)zTarget) ){ SYNTAX("invalid target on A-card"); } - if( zSrc && (nSrc!=UUID_SIZE || !validate16(zSrc, UUID_SIZE)) ){ + if( zSrc && !hname_validate(zSrc,nSrc) ){ SYNTAX("invalid source on A-card"); } p->zAttachName = (char*)file_tail(zName); p->zAttachSrc = zSrc; p->zAttachTarget = zTarget; @@ -465,18 +460,18 @@ } /* ** B ** - ** A B-line gives the UUID for the baseline of a delta-manifest. + ** A B-line gives the artifact hash for the baseline of a delta-manifest. */ case 'B': { if( p->zBaseline ) SYNTAX("more than one B-card"); p->zBaseline = next_token(&x, &sz); - if( p->zBaseline==0 ) SYNTAX("missing UUID on B-card"); - if( sz!=UUID_SIZE || !validate16(p->zBaseline, UUID_SIZE) ){ - SYNTAX("invalid UUID on B-card"); + if( p->zBaseline==0 ) SYNTAX("missing hash on B-card"); + if( !hname_validate(p->zBaseline,sz) ){ + SYNTAX("invalid hash on B-card"); } break; } @@ -522,12 +517,12 @@ case 'E': { if( p->rEventDate>0.0 ) SYNTAX("more than one E-card"); p->rEventDate = db_double(0.0,"SELECT julianday(%Q)", next_token(&x,0)); if( p->rEventDate<=0.0 ) SYNTAX("malformed date on E-card"); p->zEventId = next_token(&x, &sz); - if( sz!=UUID_SIZE || !validate16(p->zEventId, UUID_SIZE) ){ - SYNTAX("malformed UUID on E-card"); + if( !hname_validate(p->zEventId, sz) ){ + SYNTAX("malformed hash on E-card"); } break; } /* @@ -545,12 +540,13 @@ if( !file_is_simple_pathname(zName, 0) ){ SYNTAX("F-card filename is not a simple path"); } zUuid = next_token(&x, &sz); if( p->zBaseline==0 || zUuid!=0 ){ - if( sz!=UUID_SIZE ) SYNTAX("F-card UUID is the wrong size"); - if( !validate16(zUuid, UUID_SIZE) ) SYNTAX("F-card UUID invalid"); + if( !hname_validate(zUuid,sz) ){ + SYNTAX("F-card hash invalid"); + } } zPerm = next_token(&x,0); zPriorName = next_token(&x,0); if( zPriorName ){ defossilize(zPriorName); @@ -636,20 +632,21 @@ } break; } /* - ** M + ** M ** - ** An M-line identifies another artifact by its UUID. M-lines + ** An M-line identifies another artifact by its hash. M-lines ** occur in clusters only. */ case 'M': { zUuid = next_token(&x, &sz); - if( zUuid==0 ) SYNTAX("missing UUID on M-card"); - if( sz!=UUID_SIZE ) SYNTAX("wrong size for UUID on M-card"); - if( !validate16(zUuid, UUID_SIZE) ) SYNTAX("UUID invalid on M-card"); + if( zUuid==0 ) SYNTAX("missing hash on M-card"); + if( !hname_validate(zUuid,sz) ){ + SYNTAX("Invalid hash on M-card"); + } if( p->nCChild>=p->nCChildAlloc ){ p->nCChildAlloc = p->nCChildAlloc*2 + 10; p->azCChild = fossil_realloc(p->azCChild , p->nCChildAlloc*sizeof(p->azCChild[0]) ); } @@ -683,12 +680,13 @@ ** check-in historically has an empty P-card, so empty P-cards ** must be accepted. */ case 'P': { while( (zUuid = next_token(&x, &sz))!=0 ){ - if( sz!=UUID_SIZE ) SYNTAX("wrong size UUID on P-card"); - if( !validate16(zUuid, UUID_SIZE) )SYNTAX("invalid UUID on P-card"); + if( !hname_validate(zUuid, sz) ){ + SYNTAX("invalid hash on P-card"); + } if( p->nParent>=p->nParentAlloc ){ p->nParentAlloc = p->nParentAlloc*2 + 5; p->azParent = fossil_realloc(p->azParent, p->nParentAlloc*sizeof(char*)); } @@ -703,29 +701,25 @@ ** ** Specify one or a range of check-ins that are cherrypicked into ** this check-in ("+") or backed out of this check-in ("-"). */ case 'Q': { - if( (zUuid=next_token(&x, &sz))==0 ) SYNTAX("missing UUID on Q-card"); - if( sz!=UUID_SIZE+1 ) SYNTAX("wrong size UUID on Q-card"); + if( (zUuid=next_token(&x, &sz))==0 ) SYNTAX("missing hash on Q-card"); if( zUuid[0]!='+' && zUuid[0]!='-' ){ SYNTAX("Q-card does not begin with '+' or '-'"); } - if( !validate16(&zUuid[1], UUID_SIZE) ){ - SYNTAX("invalid UUID on Q-card"); + if( !hname_validate(&zUuid[1], sz-1) ){ + SYNTAX("invalid hash on Q-card"); } n = p->nCherrypick; p->nCherrypick++; p->aCherrypick = fossil_realloc(p->aCherrypick, p->nCherrypick*sizeof(p->aCherrypick[0])); p->aCherrypick[n].zCPTarget = zUuid; p->aCherrypick[n].zCPBase = zUuid = next_token(&x, &sz); - if( zUuid ){ - if( sz!=UUID_SIZE ) SYNTAX("wrong size second UUID in Q-card"); - if( !validate16(zUuid, UUID_SIZE) ){ - SYNTAX("invalid second UUID on Q-card"); - } + if( zUuid && !hname_validate(zUuid,sz) ){ + SYNTAX("invalid second hash on Q-card"); } break; } /* @@ -760,32 +754,32 @@ case 'T': { char *zName, *zValue; zName = next_token(&x, 0); if( zName==0 ) SYNTAX("missing name on T-card"); zUuid = next_token(&x, &sz); - if( zUuid==0 ) SYNTAX("missing UUID on T-card"); + if( zUuid==0 ) SYNTAX("missing artifact hash on T-card"); zValue = next_token(&x, 0); if( zValue ) defossilize(zValue); - if( sz==UUID_SIZE && validate16(zUuid, UUID_SIZE) ){ - /* A valid uuid */ + if( hname_validate(zUuid, sz) ){ + /* A valid artifact hash */ if( p->zEventId ) SYNTAX("non-self-referential T-card in event"); }else if( sz==1 && zUuid[0]=='*' ){ zUuid = 0; hasSelfRefTag = 1; if( p->zEventId && zName[0]!='+' ){ SYNTAX("propagating T-card in event"); } }else{ - SYNTAX("malformed UUID on T-card"); + SYNTAX("malformed artifact hash on T-card"); } defossilize(zName); if( zName[0]!='-' && zName[0]!='+' && zName[0]!='*' ){ SYNTAX("T-card name does not begin with '-', '+', or '*'"); } if( validate16(&zName[1], strlen(&zName[1])) ){ - /* Do not allow tags whose names look like UUIDs */ - SYNTAX("T-card name looks like a UUID"); + /* Do not allow tags whose names look like a hash */ + SYNTAX("T-card name looks like a hexadecimal hash"); } if( p->nTag>=p->nTagAlloc ){ p->nTagAlloc = p->nTagAlloc*2 + 10; p->aTag = fossil_realloc(p->aTag, p->nTagAlloc*sizeof(p->aTag[0]) ); } @@ -951,17 +945,19 @@ if( !seenZ ) SYNTAX("missing Z-card on control"); p->type = CFTYPE_CONTROL; } md5sum_init(); if( !isRepeat ) g.parseCnt[p->type]++; - blob_reset(&bUuid); return p; manifest_syntax_error: - if(bUuid.nUsed){ - blob_appendf(pErr, "manifest [%.40s] ", blob_str(&bUuid)); - blob_reset(&bUuid); + { + char *zUuid = db_text(0, "SELECT uuid FROM blob WHERE rid=%d", rid); + if( zUuid ){ + blob_appendf(pErr, "manifest [%s] ", zUuid); + fossil_free(zUuid); + } } if( zErr ){ blob_appendf(pErr, "line %d: %s", lineNo, zErr); }else{ blob_appendf(pErr, "unknown error on line %d", lineNo); @@ -1193,13 +1189,13 @@ ** then the mlink entry is only created if there is already an mlink ** from primary parent for the same file. */ static void add_one_mlink( int pmid, /* The parent manifest */ - const char *zFromUuid, /* UUID for content in parent */ + const char *zFromUuid, /* Artifact hash for content in parent */ int mid, /* The record ID of the manifest */ - const char *zToUuid, /* UUID for content in child */ + const char *zToUuid, /* artifact hash for content in child */ const char *zFilename, /* Filename */ const char *zPrior, /* Previous filename. NULL if unchanged */ int isPublic, /* True if mid is not a private manifest */ int isPrimary, /* pmid is the primary parent of mid */ int mperm /* 1: exec, 2: symlink */ @@ -1545,22 +1541,22 @@ } } /* ** For a check-in with RID "rid" that has nParent parent check-ins given -** by the UUIDs in azParent[], create all appropriate plink and mlink table +** by the hashes in azParent[], create all appropriate plink and mlink table ** entries. ** -** The primary parent is the first UUID on the azParent[] list. +** The primary parent is the first hash on the azParent[] list. ** ** Return the RID of the primary parent. */ static int manifest_add_checkin_linkages( int rid, /* The RID of the check-in */ Manifest *p, /* Manifest for this check-in */ int nParent, /* Number of parents for this check-in */ - char **azParent /* UUIDs for each parent */ + char **azParent /* hashes for each parent */ ){ int i; int parentid = 0; char zBaseId[30]; /* Baseline manifest RID for deltas. "NULL" otherwise */ Stmt q; @@ -1611,33 +1607,36 @@ return parentid; } /* ** There exists a "parent" tag against checkin rid that has value zValue. -** If value is well-formed (meaning that it is a list of UUIDs), then use +** If value is well-formed (meaning that it is a list of hashes), then use ** zValue to reparent check-in rid. */ void manifest_reparent_checkin(int rid, const char *zValue){ - int nParent; + int nParent = 0; char *zCopy = 0; char **azParent = 0; Manifest *p = 0; - int i; + int i, j; int n = (int)strlen(zValue); - nParent = (n+1)/(UUID_SIZE+1); - if( nParent*(UUID_SIZE+1) - 1 !=n ) return; - if( nParent<1 ) return; + int mxParent = (n+1)/(HNAME_MIN+1); + + if( mxParent<1 ) return; zCopy = fossil_strdup(zValue); - azParent = fossil_malloc( sizeof(azParent[0])*nParent ); - for(i=0; imxParent ) goto reparent_abort; + for(j=HNAME_MIN; z[j]>' '; j++){} + if( !hname_validate(z, j) ) goto reparent_abort; + if( z[j]==0 ) break; + z[j] = 0; + i += j; + } + if( !db_exists("SELECT 1 FROM plink WHERE cid=%d AND pid=%d", rid, uuid_to_rid(azParent[0],0)) ){ p = manifest_get(rid, CFTYPE_MANIFEST, 0); } if( p!=0 ){ @@ -1647,10 +1646,11 @@ rid, rid ); manifest_add_checkin_linkages(rid,p,nParent,azParent); } manifest_destroy(p); +reparent_abort: fossil_free(azParent); fossil_free(zCopy); } /* @@ -2280,11 +2280,11 @@ int branchMove = 0; blob_zero(&comment); if( p->zComment ){ blob_appendf(&comment, " %s.", p->zComment); } - /* Next loop expects tags to be sorted on UUID, so sort it. */ + /* Next loop expects tags to be sorted on hash, so sort it. */ qsort(p->aTag, p->nTag, sizeof(p->aTag[0]), tag_compare); for(i=0; inTag; i++){ zTagUuid = p->aTag[i].zUuid; if( !zTagUuid ) continue; if( i==0 || fossil_strcmp(zTagUuid, p->aTag[i-1].zUuid)!=0 ){ Index: src/mkversion.c ================================================================== --- src/mkversion.c +++ src/mkversion.c @@ -12,11 +12,12 @@ #include int main(int argc, char *argv[]){ FILE *m,*u,*v; char *z; - int i, x, d; + int i, j, x, d; + int vn[3]; char b[1000]; char vx[1000]; memset(b,0,sizeof(b)); memset(vx,0,sizeof(vx)); u = fopen(argv[1],"r"); @@ -45,25 +46,25 @@ fclose(v); for(z=b; z[0] && z[0]!='\r' && z[0]!='\n'; z++){} *z = 0; printf("#define RELEASE_VERSION \"%s\"\n", b); x=0; - i=0; + i=j=0; z=b; + vn[0] = vn[1] = vn[2] = 0; while(1){ if( z[0]>='0' && z[0]<='9' ){ x = x*10 + z[0] - '0'; }else{ - sprintf(&vx[i],"%02d",x); - i += 2; + if( j<3 ) vn[j++] = x; x = 0; if( z[0]==0 ) break; } z++; } for(z=vx; z[0]=='0'; z++){} - printf("#define RELEASE_VERSION_NUMBER %s\n", z); + printf("#define RELEASE_VERSION_NUMBER %d%02d%02d\n", vn[0], vn[1], vn[2]); memset(vx,0,sizeof(vx)); strcpy(vx,b); d = 0; for(z=vx; z[0]; z++){ if( z[0]=='-' ){ Index: src/name.c ================================================================== --- src/name.c +++ src/name.c @@ -2,11 +2,11 @@ ** Copyright (c) 2006 D. Richard Hipp ** ** This program is free software; you can redistribute it and/or ** modify it under the terms of the Simplified BSD License (also ** known as the "2-Clause License" or "FreeBSD License".) - +** ** This program is distributed in the hope that it will be useful, ** but without any warranty; without even the implied warranty of ** merchantability or fitness for a particular purpose. ** ** Author contact information: @@ -13,15 +13,11 @@ ** drh@hwaci.com ** http://www.hwaci.com/drh/ ** ******************************************************************************* ** -** This file contains code used to convert user-supplied object names into -** canonical UUIDs. -** -** A user-supplied object name is any unique prefix of a valid UUID but -** not necessarily in canonical form. +** This file contains code used to resolved user-supplied object names. */ #include "config.h" #include "name.h" #include @@ -80,12 +76,12 @@ } /* ** Convert a symbolic name into a RID. Acceptable forms: ** -** * SHA1 hash -** * SHA1 hash prefix of at least 4 characters +** * artifact hash +** * 4-character or larger prefix of a artifact ** * Symbolic Name ** * "tag:" + symbolic name ** * Date or date-time ** * "date:" + Date or date-time ** * symbolic-name ":" date-time @@ -105,11 +101,11 @@ ** If zType is "br" then find the first check-in of the named branch ** rather than the last. ** zType is "ci" in most use cases since we are usually searching for ** a check-in. ** -** Note that the input zTag for types "t" and "e" is the SHA1 hash of +** Note that the input zTag for types "t" and "e" is the artifact hash of ** the ticket-change or event-change artifact, not the randomly generated ** hexadecimal identifier assigned to tickets and events. Those identifiers ** live in a separate namespace. */ int symbolic_name_to_rid(const char *zTag, const char *zType){ @@ -232,14 +228,14 @@ zTagBase, zDate, zType ); return rid; } - /* SHA1 hash or prefix */ - if( nTag>=4 && nTag<=UUID_SIZE && validate16(zTag, nTag) ){ + /* artifact hash or prefix */ + if( nTag>=4 && nTag<=HNAME_MAX && validate16(zTag, nTag) ){ Stmt q; - char zUuid[UUID_SIZE+1]; + char zUuid[HNAME_MAX+1]; memcpy(zUuid, zTag, nTag+1); canonical16(zUuid, nTag); rid = 0; if( zType[0]=='*' ){ db_prepare(&q, "SELECT rid FROM blob WHERE uuid GLOB '%q*'", zUuid); @@ -354,11 +350,11 @@ */ int name_collisions(const char *zName){ int c = 0; /* count of collisions for zName */ int nLen; /* length of zName */ nLen = strlen(zName); - if( nLen>=4 && nLen<=UUID_SIZE && validate16(zName, nLen) ){ + if( nLen>=4 && nLen<=HNAME_MAX && validate16(zName, nLen) ){ c = db_int(0, "SELECT" " (SELECT count(*) FROM ticket" " WHERE tkt_uuid GLOB '%q*') +" " (SELECT count(*) FROM tag" @@ -396,11 +392,11 @@ /* ** Convert a name to a rid. If the name can be any of the various forms ** accepted: ** -** * SHA1 hash or prefix thereof +** * artifact hash or prefix thereof ** * symbolic name ** * date ** * label:date ** * prev, previous ** * next @@ -425,13 +421,13 @@ return name_to_typed_rid(zName, "*"); } /* ** WEBPAGE: ambiguous -** URL: /ambiguous?name=UUID&src=WEBPAGE +** URL: /ambiguous?name=NAME&src=WEBPAGE ** -** The UUID given by the name parameter is ambiguous. Display a page +** The NAME given by the name parameter is ambiguous. Display a page ** that shows all possible choices and let the user select between them. */ void ambiguous_page(void){ Stmt q; const char *zName = P("name"); @@ -668,11 +664,11 @@ /* ** COMMAND: whatis* ** ** Usage: %fossil whatis NAME ** -** Resolve the symbol NAME into its canonical 40-character SHA1-hash +** Resolve the symbol NAME into its canonical artifact hash ** artifact name and provide a description of what role that artifact ** plays. ** ** Options: ** @@ -744,11 +740,11 @@ /* ** COMMAND: test-ambiguous ** ** Usage: %fossil test-ambiguous [--minsize N] ** -** Show a list of ambiguous SHA1-hash abbreviations of N characters or +** Show a list of ambiguous artifact hash abbreviations of N characters or ** more where N defaults to 4. Change N to a different value using ** the "--minsize N" command-line option. */ void test_ambiguous_cmd(void){ Stmt q, ins; @@ -794,11 +790,11 @@ ** Schema for the description table */ static const char zDescTab[] = @ CREATE TEMP TABLE IF NOT EXISTS description( @ rid INTEGER PRIMARY KEY, -- RID of the object -@ uuid TEXT, -- SHA1 hash of the object +@ uuid TEXT, -- hash of the object @ ctime DATETIME, -- Time of creation @ isPrivate BOOLEAN DEFAULT 0, -- True for unpublished artifacts @ type TEXT, -- file, checkin, wiki, ticket, etc. @ summary TEXT, -- Summary comment for the object @ detail TEXT -- File name, check-in comment, etc @@ -1084,11 +1080,11 @@ " WHERE description.rid=blob.rid" " ORDER BY length(content) DESC" ); @ @ + @ @ while( db_step(&q)==SQLITE_ROW ){ int rid = db_column_int(&q,0); const char *zUuid = db_column_text(&q, 1); const char *zDesc = db_column_text(&q, 2); @@ -1147,33 +1143,33 @@ /* Maximum number of collision examples to remember */ #define MAX_COLLIDE 25 /* -** Generate a report on the number of collisions in SHA1 hashes +** Generate a report on the number of collisions in artifact hashes ** generated by the SQL given in the argument. */ static void collision_report(const char *zSql){ int i, j, kk; int nHash = 0; Stmt q; - char zPrev[UUID_SIZE+1]; + char zPrev[HNAME_MAX+1]; struct { int cnt; char *azHit[MAX_COLLIDE]; - char z[UUID_SIZE+1]; - } aCollide[UUID_SIZE+1]; + char z[HNAME_MAX+1]; + } aCollide[HNAME_MAX+1]; memset(aCollide, 0, sizeof(aCollide)); memset(zPrev, 0, sizeof(zPrev)); db_prepare(&q,"%s",zSql/*safe-for-%s*/); while( db_step(&q)==SQLITE_ROW ){ const char *zUuid = db_column_text(&q,0); int n = db_column_bytes(&q,0); int i; nHash++; for(i=0; zPrev[i] && zPrev[i]==zUuid[i]; i++){} - if( i>0 && i<=UUID_SIZE ){ + if( i>0 && i<=HNAME_MAX ){ if( i>=4 && aCollide[i].cnt @ @ - for(i=1; i<=UUID_SIZE; i++){ + for(i=1; i<=HNAME_MAX; i++){ if( aCollide[i].cnt==0 ) continue; @ } @
SizeRID - @ Delta FromSHA1DescriptionDate
Delta FromHashDescriptionDate
LengthInstancesFirst Instance
%d(i)%d(aCollide[i].cnt)%h(aCollide[i].z)
@

Total number of hashes: %d(nHash)

kk = 0; - for(i=UUID_SIZE; i>=4; i--){ + for(i=HNAME_MAX; i>=4; i--){ if( aCollide[i].cnt==0 ) continue; if( aCollide[i].cnt>200 ) break; kk += aCollide[i].cnt; if( aCollide[i].cnt<25 ){ @

Collisions of length %d(i): @@ -1219,11 +1215,11 @@ ** Show the number of hash collisions for hash prefixes of various lengths. */ void hash_collisions_webpage(void){ login_check_credentials(); if( !g.perm.Read ){ login_needed(g.anon.Read); return; } - style_header("SHA1 Prefix Collisions"); + style_header("Hash Prefix Collisions"); style_submenu_element("Activity Reports", "reports"); style_submenu_element("Stats", "stat"); @

Hash Prefix Collisions on Check-ins

collision_report("SELECT (SELECT uuid FROM blob WHERE rid=objid)" " FROM event WHERE event.type='ci'" Index: src/printf.c ================================================================== --- src/printf.c +++ src/printf.c @@ -24,11 +24,11 @@ # include # include #endif #include -/* Two custom conversions are used to show a prefix of SHA1 hashes: +/* Two custom conversions are used to show a prefix of artifact hashes: ** ** %!S Prefix of a length appropriate for URLs ** %S Prefix of a length appropriate for human display ** ** The following macros help determine those lengths. FOSSIL_HASH_DIGITS @@ -44,11 +44,11 @@ #ifndef FOSSIL_HASH_DIGITS_URL # define FOSSIL_HASH_DIGITS_URL 16 /* For %!S (embedded in URLs) */ #endif /* -** Return the number of SHA1 hash digits to display. The number is for +** Return the number of artifact hash digits to display. The number is for ** human output if the bForUrl is false and is destined for a URL if ** bForUrl is false. */ static int hashDigits(int bForUrl){ static int nDigitHuman = 0; Index: src/purge.c ================================================================== --- src/purge.c +++ src/purge.c @@ -43,11 +43,11 @@ @ ); @ CREATE TABLE IF NOT EXISTS "%w".purgeitem( @ piid INTEGER PRIMARY KEY, -- ID for the purge item @ peid INTEGER REFERENCES purgeevent ON DELETE CASCADE, -- Purge event @ orid INTEGER, -- Original RID before purged -@ uuid TEXT NOT NULL, -- SHA1 hash of the purged artifact +@ uuid TEXT NOT NULL, -- hash of the purged artifact @ srcid INTEGER, -- Basis purgeitem for delta compression @ isPrivate BOOLEAN, -- True if artifact was originally private @ sz INT NOT NULL, -- Uncompressed size of the purged artifact @ desc TEXT, -- Brief description of this artifact @ data BLOB -- Compressed artifact content @@ -347,11 +347,11 @@ int piid, /* ID of the item to extract */ Blob *pOut /* Write the content into this blob */ ){ Stmt q; int srcid; - Blob h1, h2, x; + Blob h1, x; static Bag busy; db_prepare(&q, "SELECT uuid, srcid, data FROM purgeitem" " WHERE piid=%d", piid); if( db_step(&q)!=SQLITE_ROW ){ @@ -376,17 +376,14 @@ blob_reset(&baseline); } bag_remove(&busy, piid); blob_zero(&h1); db_column_blob(&q, 0, &h1); - sha1sum_blob(pOut, &h2); - if( blob_compare(&h1, &h2)!=0 ){ - fossil_fatal("SHA1 hash mismatch - wanted %s, got %s", - blob_str(&h1), blob_str(&h2)); + if( hname_verify_hash(pOut, blob_buffer(&h1), blob_size(&h1))==0 ){ + fossil_fatal("incorrect artifact hash on %b", &h1); } blob_reset(&h1); - blob_reset(&h2); db_finalize(&q); return 0; } /* @@ -411,11 +408,11 @@ " WHERE ix.srcid=%d" " AND ix.piid=purgeitem.piid;", iSrc ); while( db_step(&q)==SQLITE_ROW ){ - Blob h1, h2, c1, c2; + Blob h1, c1, c2; int isPriv, rid; blob_zero(&h1); db_column_blob(&q, 0, &h1); blob_zero(&c1); db_column_blob(&q, 1, &c1); @@ -425,16 +422,13 @@ blob_delta_apply(pBasis, &c1, &c2); blob_reset(&c1); }else{ c2 = c1; } - sha1sum_blob(&c2, &h2); - if( blob_compare(&h1, &h2)!=0 ){ - fossil_fatal("SHA1 hash mismatch - wanted %s, got %s", - blob_str(&h1), blob_str(&h2)); + if( hname_verify_hash(&c2, blob_buffer(&h1), blob_size(&h1))==0 ){ + fossil_fatal("incorrect hash on %b", &h1); } - blob_reset(&h2); isPriv = db_column_int(&q, 2); rid = content_put_ex(&c2, blob_str(&h1), 0, 0, isPriv); if( rid==0 ){ fossil_fatal("%s", g.zErrMsg); }else{ Index: src/rebuild.c ================================================================== --- src/rebuild.c +++ src/rebuild.c @@ -21,76 +21,41 @@ #include "rebuild.h" #include #include /* -** Make changes to the stable part of the schema (the part that is not -** simply deleted and reconstructed on a rebuild) to bring the schema -** up to the latest. +** Update the schema as necessary */ -static const char zSchemaUpdates1[] = -@ -- Index on the delta table -@ -- -@ CREATE INDEX IF NOT EXISTS delta_i1 ON delta(srcid); -@ -@ -- Artifacts that should not be processed are identified in the -@ -- "shun" table. Artifacts that are control-file forgeries or -@ -- spam or artifacts whose contents violate administrative policy -@ -- can be shunned in order to prevent them from contaminating -@ -- the repository. -@ -- -@ -- Shunned artifacts do not exist in the blob table. Hence they -@ -- have not artifact ID (rid) and we thus must store their full -@ -- UUID. -@ -- -@ CREATE TABLE IF NOT EXISTS shun( -@ uuid UNIQUE, -- UUID of artifact to be shunned. Canonical form -@ mtime INTEGER, -- When added. Seconds since 1970 -@ scom TEXT -- Optional text explaining why the shun occurred -@ ); -@ -@ -- Artifacts that should not be pushed are stored in the "private" -@ -- table. -@ -- -@ CREATE TABLE IF NOT EXISTS private(rid INTEGER PRIMARY KEY); -@ -@ -- Some ticket content (such as the originators email address or contact -@ -- information) needs to be obscured to protect privacy. This is achieved -@ -- by storing an SHA1 hash of the content. For display, the hash is -@ -- mapped back into the original text using this table. -@ -- -@ -- This table contains sensitive information and should not be shared -@ -- with unauthorized users. -@ -- -@ CREATE TABLE IF NOT EXISTS concealed( -@ hash TEXT PRIMARY KEY, -- The SHA1 hash of content -@ mtime INTEGER, -- Time created. Seconds since 1970 -@ content TEXT -- Content intended to be concealed -@ ); -; -static const char zSchemaUpdates2[] = -@ -- An entry in this table describes a database query that generates a -@ -- table of tickets. -@ -- -@ CREATE TABLE IF NOT EXISTS reportfmt( -@ rn INTEGER PRIMARY KEY, -- Report number -@ owner TEXT, -- Owner of this report format (not used) -@ title TEXT UNIQUE, -- Title of this report -@ mtime INTEGER, -- Time last modified. Seconds since 1970 -@ cols TEXT, -- A color-key specification -@ sqlcode TEXT -- An SQL SELECT statement for this report -@ ); -; - static void rebuild_update_schema(void){ - int rc; - db_multi_exec("%s", zSchemaUpdates1 /*safe-for-%s*/); - db_multi_exec("%s", zSchemaUpdates2 /*safe-for-%s*/); + /* Verify that the PLINK table has a new column added by the + ** 2014-11-28 schema change. Create it if necessary. This code + ** can be removed in the future, once all users have upgraded to the + ** 2014-11-28 or later schema. + */ + if( !db_table_has_column("repository","plink","baseid") ){ + db_multi_exec( + "ALTER TABLE repository.plink ADD COLUMN baseid;" + ); + } + + /* Verify that the MLINK table has the newer columns added by the + ** 2015-01-24 schema change. Create them if necessary. This code + ** can be removed in the future, once all users have upgraded to the + ** 2015-01-24 or later schema. + */ + if( !db_table_has_column("repository","mlink","isaux") ){ + db_begin_transaction(); + db_multi_exec( + "ALTER TABLE repository.mlink ADD COLUMN pmid INTEGER DEFAULT 0;" + "ALTER TABLE repository.mlink ADD COLUMN isaux BOOLEAN DEFAULT 0;" + ); + db_end_transaction(0); + } - rc = db_exists("SELECT 1 FROM sqlite_master" - " WHERE name='user' AND sql GLOB '* mtime *'"); - if( rc==0 ){ + /* Add the user.mtime column if it is missing. (2011-04-27) + */ + if( !db_table_has_column("repository", "user", "mtime") ){ db_multi_exec( "CREATE TEMP TABLE temp_user AS SELECT * FROM user;" "DROP TABLE user;" "CREATE TABLE user(\n" " uid INTEGER PRIMARY KEY,\n" @@ -109,54 +74,99 @@ " ipaddr, cexpire, info, now(), photo FROM temp_user;" "DROP TABLE temp_user;" ); } - rc = db_exists("SELECT 1 FROM sqlite_master" - " WHERE name='config' AND sql GLOB '* mtime *'"); - if( rc==0 ){ + /* Add the config.mtime column if it is missing. (2011-04-27) + */ + if( !db_table_has_column("repository", "config", "mtime") ){ db_multi_exec( "ALTER TABLE config ADD COLUMN mtime INTEGER;" "UPDATE config SET mtime=now();" ); } - rc = db_exists("SELECT 1 FROM sqlite_master" - " WHERE name='shun' AND sql GLOB '* mtime *'"); - if( rc==0 ){ + /* Add the shun.mtime and shun.scom columns if they are missing. + ** (2011-04-27) + */ + if( !db_table_has_column("repository", "shun", "mtime") ){ db_multi_exec( "ALTER TABLE shun ADD COLUMN mtime INTEGER;" "ALTER TABLE shun ADD COLUMN scom TEXT;" "UPDATE shun SET mtime=now();" ); } - rc = db_exists("SELECT 1 FROM sqlite_master" - " WHERE name='reportfmt' AND sql GLOB '* mtime *'"); - if( rc==0 ){ + /* Add the reportfmt.mtime column if it is missing. (2011-04-27) + */ + if( !db_table_has_column("repository", "reportfmt", "mtime") ){ + static const char zCreateReportFmtTable[] = + @ -- An entry in this table describes a database query that generates a + @ -- table of tickets. + @ -- + @ CREATE TABLE IF NOT EXISTS reportfmt( + @ rn INTEGER PRIMARY KEY, -- Report number + @ owner TEXT, -- Owner of this report format (not used) + @ title TEXT UNIQUE, -- Title of this report + @ mtime INTEGER, -- Time last modified. Seconds since 1970 + @ cols TEXT, -- A color-key specification + @ sqlcode TEXT -- An SQL SELECT statement for this report + @ ); + ; db_multi_exec( "CREATE TEMP TABLE old_fmt AS SELECT * FROM reportfmt;" "DROP TABLE reportfmt;" ); - db_multi_exec("%s", zSchemaUpdates2/*safe-for-%s*/); + db_multi_exec("%s", zCreateReportFmtTable/*safe-for-%s*/); db_multi_exec( "INSERT OR IGNORE INTO reportfmt(rn,owner,title,cols,sqlcode,mtime)" " SELECT rn, owner, title, cols, sqlcode, now() FROM old_fmt;" "INSERT OR IGNORE INTO reportfmt(rn,owner,title,cols,sqlcode,mtime)" " SELECT rn, owner, title || ' (' || rn || ')', cols, sqlcode, now()" " FROM old_fmt;" ); } - rc = db_exists("SELECT 1 FROM sqlite_master" - " WHERE name='concealed' AND sql GLOB '* mtime *'"); - if( rc==0 ){ + /* Add the concealed.mtime column if it is missing. (2011-04-27) + */ + if( !db_table_has_column("repository", "concealed", "mtime") ){ db_multi_exec( "ALTER TABLE concealed ADD COLUMN mtime INTEGER;" "UPDATE concealed SET mtime=now();" ); } + + /* Do the fossil-2.0 updates to the schema. (2017-02-28) + */ + rebuild_schema_update_2_0(); +} + +/* +** Update the repository schema for Fossil version 2.0. (2017-02-28) +** (1) Change the CHECK constraint on BLOB.UUID so that the length +** is greater than or equal to 40, not exactly equal to 40. +*/ +void rebuild_schema_update_2_0(void){ + char *z = db_text(0, "SELECT sql FROM repository.sqlite_master WHERE name='blob'"); + if( z ){ + /* Search for: length(uuid)==40 + ** 0123456789 12345 */ + int i; + for(i=10; z[i]; i++){ + if( z[i]=='=' && strncmp(&z[i-6],"(uuid)==40",10)==0 ){ + z[i] = '>'; + db_multi_exec( + "PRAGMA writable_schema=ON;" + "UPDATE repository.sqlite_master SET sql=%Q WHERE name LIKE 'blob';" + "PRAGMA writable_schema=OFF;", + z + ); + break; + } + } + fossil_free(z); + } } /* ** Variables used to store state information about an on-going "rebuild" ** or "deconstruct". @@ -330,37 +340,40 @@ ** extracted in a random order. This feature is used to test the ** ability of fossil to accept records in any order and still ** construct a sane repository. */ int rebuild_db(int randomize, int doOut, int doClustering){ - Stmt s; + Stmt s, q; int errCnt = 0; char *zTable; int incrSize; + Blob sql; bag_init(&bagDone); ttyOutput = doOut; processCnt = 0; if (ttyOutput && !g.fQuiet) { percent_complete(0); } rebuild_update_schema(); - for(;;){ - zTable = db_text(0, - "SELECT name FROM sqlite_master /*scan*/" - " WHERE type='table'" - " AND name NOT IN ('admin_log', 'blob','delta','rcvfrom','user'," - "'config','shun','private','reportfmt'," - "'concealed','accesslog','modreq'," - "'purgeevent','purgeitem','unversioned')" - " AND name NOT GLOB 'sqlite_*'" - " AND name NOT GLOB 'fx_*'" - ); - if( zTable==0 ) break; - db_multi_exec("DROP TABLE %Q", zTable); - free(zTable); - } + blob_init(&sql, 0, 0); + db_prepare(&q, + "SELECT name FROM sqlite_master /*scan*/" + " WHERE type='table'" + " AND name NOT IN ('admin_log', 'blob','delta','rcvfrom','user','alias'," + "'config','shun','private','reportfmt'," + "'concealed','accesslog','modreq'," + "'purgeevent','purgeitem','unversioned')" + " AND name NOT GLOB 'sqlite_*'" + " AND name NOT GLOB 'fx_*'" + ); + while( db_step(&q)==SQLITE_ROW ){ + blob_appendf(&sql, "DROP TABLE \"%w\";\n", db_column_text(&q,0)); + } + db_finalize(&q); + db_multi_exec("%s", blob_str(&sql)/*safe-for-%s*/); + blob_reset(&sql); db_multi_exec("%s", zRepositorySchema2/*safe-for-%s*/); ticket_create_table(0); shun_artifacts(); db_multi_exec( @@ -994,11 +1007,11 @@ ** ** ** This command exports all artifacts of a given repository and ** writes all artifacts to the file system. The DESTINATION directory ** will be populated with subdirectories AA and files AA/BBBBBBBBB.., where -** AABBBBBBBBB.. is the 40 character artifact ID, AA the first 2 characters. +** AABBBBBBBBB.. is the 40+ character artifact ID, AA the first 2 characters. ** If -L|--prefixlength is given, the length (default 2) of the directory ** prefix can be set to 0,1,..,9 characters. ** ** Options: ** -R|--repository REPOSITORY deconstruct given REPOSITORY Index: src/schema.c ================================================================== --- src/schema.c +++ src/schema.c @@ -79,13 +79,13 @@ @ -- @ CREATE TABLE blob( @ rid INTEGER PRIMARY KEY, -- Record ID @ rcvid INTEGER, -- Origin of this record @ size INTEGER, -- Size of content. -1 for a phantom. -@ uuid TEXT UNIQUE NOT NULL, -- SHA1 hash of the content +@ uuid TEXT UNIQUE NOT NULL, -- hash of the content @ content BLOB, -- Compressed content of this record -@ CHECK( length(uuid)==40 AND rid>0 ) +@ CHECK( length(uuid)>=40 AND rid>0 ) @ ); @ CREATE TABLE delta( @ rid INTEGER PRIMARY KEY, -- BLOB that is delta-compressed @ srcid INTEGER NOT NULL REFERENCES blob -- Baseline for delta-compression @ ); Index: src/sha1.c ================================================================== --- src/sha1.c +++ src/sha1.c @@ -1,6 +1,22 @@ /* +** Copyright (c) 2006 D. Richard Hipp +** +** This program is free software; you can redistribute it and/or +** modify it under the terms of the Simplified BSD License (also +** known as the "2-Clause License" or "FreeBSD License".) +** +** This program is distributed in the hope that it will be useful, +** but without any warranty; without even the implied warranty of +** merchantability or fitness for a particular purpose. +** +** Author contact information: +** drh@hwaci.com +** http://www.hwaci.com/drh/ +** +******************************************************************************* +** ** This implementation of SHA1. */ #include "config.h" #include #include "sha1.h" Index: src/sha3.c ================================================================== --- src/sha3.c +++ src/sha3.c @@ -1,6 +1,22 @@ /* +** Copyright (c) 2017 D. Richard Hipp +** +** This program is free software; you can redistribute it and/or +** modify it under the terms of the Simplified BSD License (also +** known as the "2-Clause License" or "FreeBSD License".) +** +** This program is distributed in the hope that it will be useful, +** but without any warranty; without even the implied warranty of +** merchantability or fitness for a particular purpose. +** +** Author contact information: +** drh@hwaci.com +** http://www.hwaci.com/drh/ +** +******************************************************************************* +** ** This file contains an implementation of SHA3 (Keccak) hashing. */ #include "config.h" #include "sha3.h" @@ -401,11 +417,11 @@ unsigned int nData ){ unsigned int i = 0; #if SHA3_BYTEORDER==1234 if( (p->nLoaded % 8)==0 && ((aData - (const unsigned char*)0)&7)==0 ){ - for(; iu.s[p->nLoaded/8] ^= *(u64*)&aData[i]; p->nLoaded += 8; if( p->nLoaded>=p->nRate ){ KeccakF1600Step(p); p->nLoaded = 0; @@ -608,22 +624,22 @@ ** Compute an SHA3 checksum of all files named on the command-line. ** If a file is named "-" then take its content from standard input. ** ** Options: ** -** --224 Compute a SHA3-224 hash (the default) -** --256 Compute a SHA3-256 hash +** --224 Compute a SHA3-224 hash +** --256 Compute a SHA3-256 hash (the default) ** --384 Compute a SHA3-384 hash ** --512 Compute a SHA3-512 hash ** --size N An N-bit hash. N must be a multiple of 32 between 128 ** and 512. */ void sha3sum_test(void){ int i; Blob in; Blob cksum; - int iSize = 224; + int iSize = 256; if( find_option("224",0,0)!=0 ) iSize = 224; else if( find_option("256",0,0)!=0 ) iSize = 256; else if( find_option("384",0,0)!=0 ) iSize = 384; else if( find_option("512",0,0)!=0 ) iSize = 512; Index: src/shun.c ================================================================== --- src/shun.c +++ src/shun.c @@ -36,11 +36,11 @@ } /* ** WEBPAGE: shun ** -** View the SHA1 hashes of all shunned artifacts. Add new hashes +** View the hashes of all shunned artifacts. Add new hashes ** to the shun set. Requires Admin privilege. */ void shun_page(void){ Stmt q; int cnt = 0; @@ -84,18 +84,18 @@ } zCanonical[j+1] = zCanonical[j] = 0; p = zCanonical; while( *p ){ int nUuid = strlen(p); - if( nUuid!=UUID_SIZE || !validate16(p, nUuid) ){ + if( !hname_validate(p, nUuid) ){ @

Error: Bad artifact IDs.

fossil_free(zCanonical); zCanonical = 0; break; }else{ - canonical16(p, UUID_SIZE); - p += UUID_SIZE+1; + canonical16(p, nUuid); + p += nUuid+1; } } zUuid = zCanonical; } style_header("Shunned Artifacts"); @@ -107,21 +107,21 @@ db_multi_exec("DELETE FROM shun WHERE uuid=%Q", p); if( !db_exists("SELECT 1 FROM blob WHERE uuid=%Q", p) ){ allExist = 0; } admin_log("Unshunned %Q", p); - p += UUID_SIZE+1; + p += strlen(p)+1; } if( allExist ){ @

Artifact(s)
- for( p = zUuid ; *p ; p += UUID_SIZE+1 ){ + for( p = zUuid ; *p ; p += strlen(p)+1 ){ @ %s(p)
} @ are no longer being shunned.

}else{ @

Artifact(s)
- for( p = zUuid ; *p ; p += UUID_SIZE+1 ){ + for( p = zUuid ; *p ; p += strlen(p)+1 ){ @ %s(p)
} @ will no longer be shunned. But they may not exist in the repository. @ It may be necessary to rebuild the repository using the @ fossil rebuild command-line before the artifact content @@ -146,14 +146,14 @@ db_multi_exec("DELETE FROM ticket WHERE tkt_uuid=%Q", p); db_multi_exec("DELETE FROM tag WHERE tagid=%d", tagid); db_multi_exec("DELETE FROM tagxref WHERE tagid=%d", tagid); } admin_log("Shunned %Q", p); - p += UUID_SIZE+1; + p += strlen(p)+1; } @

Artifact(s)
- for( p = zUuid ; *p ; p += UUID_SIZE+1 ){ + for( p = zUuid ; *p ; p += strlen(p)+1 ){ @ %s(p)
} @ have been shunned. They will no longer be pushed. @ They will be removed from the repository the next time the repository @ is rebuilt using the fossil rebuild command-line

@@ -167,18 +167,18 @@ @ artifact content will be purged from the repository the next time the @ repository is rebuilt. A list of shunned artifacts can be seen at the @ bottom of this page.

@ @ - @

To shun artifacts, enter their artifact IDs (the 40-character SHA1 - @ hash of the artifacts) in the + @

To shun artifacts, enter their artifact hashes (the 40- or + @ 64-character lowercase hexadecimal hash of the artifact content) in the @ following box and press the "Shun" button. This will cause the artifacts @ to be removed from the repository and will prevent the artifacts from being @ readded to the repository by subsequent sync operation.

@ - @

Note that you must enter the full 40-character artifact IDs, not - @ an abbreviation or a symbolic tag.

+ @

Note that you must enter the full 40- or 64-character artifact hashes, + @ not an abbreviation or a symbolic tag.

@ @

Warning: Shunning should only be used to remove inappropriate content @ from the repository. Inappropriate content includes such things as @ spam added to Wiki, files that violate copyright or patent agreements, @ or artifacts that by design or accident interfere with the processing Index: src/sitemap.c ================================================================== --- src/sitemap.c +++ src/sitemap.c @@ -111,12 +111,11 @@ } @

  • %z(href("%R/login"))Login/Logout/Change Password
  • if( g.perm.Read ){ @
  • %z(href("%R/stat"))Repository Status @
      - @
    • %z(href("%R/hash-collisions"))Collisions on SHA1 hash - @ prefixes
    • + @
    • %z(href("%R/hash-collisions"))Collisions on hash prefixes
    • if( g.perm.Admin ){ @
    • %z(href("%R/urllist"))List of URLs used to access @ this repository
    • } @
    • %z(href("%R/bloblist"))List of Artifacts
    • Index: src/sqlcmd.c ================================================================== --- src/sqlcmd.c +++ src/sqlcmd.c @@ -185,12 +185,12 @@ ** in ways that are unrecoverable. Be sure you know what you are doing before ** running any SQL commands that modify the repository database. ** ** The following extensions to the usual SQLite commands are provided: ** -** content(X) Return the content of artifact X. X can be a -** SHA1 hash or prefix or a tag. +** content(X) Return the content of artifact X. X can be an +** artifact hash or prefix or a tag. ** ** compress(X) Compress text X. ** ** decompress(X) Decompress text X. Undoes the work of ** compress(X). Index: src/stat.c ================================================================== --- src/stat.c +++ src/stat.c @@ -76,11 +76,11 @@ style_submenu_element("URLs", "urllist"); style_submenu_element("Schema", "repo_schema"); style_submenu_element("Web-Cache", "cachestat"); } style_submenu_element("Activity Reports", "reports"); - style_submenu_element("SHA1 Collisions", "hash-collisions"); + style_submenu_element("Hash Collisions", "hash-collisions"); if( sqlite3_compileoption_used("ENABLE_DBSTAT_VTAB") ){ style_submenu_element("Table Sizes", "repo-tabsize"); } if( g.perm.Admin || g.perm.Setup || db_get_boolean("test_env_enable",0) ){ style_submenu_element("Environment", "test_env"); Index: src/tag.c ================================================================== --- src/tag.c +++ src/tag.c @@ -627,11 +627,11 @@ blob_init(&value, 0, 0); for(i=3; i3 ) blob_append(&value, " ", 1); zUuid = rid_to_uuid(pid); - blob_append(&value, zUuid, UUID_SIZE); + blob_append(&value, zUuid, strlen(zUuid)); fossil_free(zUuid); } if( bTest && !dryRun ){ tag_insert("parent", 1, blob_str(&value), -1, 0.0, rid); }else{ Index: src/tar.c ================================================================== --- src/tar.c +++ src/tar.c @@ -476,18 +476,19 @@ Manifest *pManifest; ManifestFile *pFile; Blob filename; int nPrefix; char *zName = 0; + char *zUuid; unsigned int mTime; content_get(rid, &mfile); if( blob_size(&mfile)==0 ){ blob_zero(pTar); return; } - blob_zero(&hash); + blob_set_dynamic(&hash, rid_to_uuid(rid)); blob_zero(&filename); if( zDir && zDir[0] ){ blob_appendf(&filename, "%s/", zDir); } @@ -520,13 +521,10 @@ if( eflg & (MFESTFLG_RAW|MFESTFLG_UUID) ){ if( eflg & MFESTFLG_RAW ){ blob_append(&filename, "manifest", -1); zName = blob_str(&filename); } - if( eflg & MFESTFLG_UUID ){ - sha1sum_blob(&mfile, &hash); - } if( eflg & MFESTFLG_RAW ) { sterilize_manifest(&mfile); tar_add_file(zName, &mfile, 0, mTime); } } @@ -535,11 +533,10 @@ blob_append(&hash, "\n", 1); blob_resize(&filename, nPrefix); blob_append(&filename, "manifest.uuid", -1); zName = blob_str(&filename); tar_add_file(zName, &hash, 0, mTime); - blob_reset(&hash); } if( eflg & MFESTFLG_TAGS ){ Blob tagslist; blob_zero(&tagslist); get_checkin_taglist(rid, &tagslist); @@ -564,19 +561,19 @@ tar_add_file(zName, &file, manifest_file_mperm(pFile), mTime); blob_reset(&file); } } }else{ - sha1sum_blob(&mfile, &hash); blob_append(&filename, blob_str(&hash), 16); zName = blob_str(&filename); mTime = db_int64(0, "SELECT (julianday('now') - 2440587.5)*86400.0;"); tar_begin(mTime); tar_add_file(zName, &mfile, 0, mTime); } manifest_destroy(pManifest); blob_reset(&mfile); + blob_reset(&hash); blob_reset(&filename); tar_finish(pTar); } /* Index: src/timeline.c ================================================================== --- src/timeline.c +++ src/timeline.c @@ -723,11 +723,11 @@ ** mi: "merge-in". An array of integer rail positions from which ** merge arrows should be drawn into this node. If the value is ** negative, then the rail position is the absolute value of mi[] ** and a thin merge-arrow descender is drawn to the bottom of ** the screen. - ** h: The SHA1 hash of the object being graphed + ** h: The artifact hash of the object being graphed */ cgi_printf("var rowinfo = [\n"); for(pRow=pGraph->pFirst; pRow; pRow=pRow->pNext){ cgi_printf("{id:%d,bg:\"%s\",r:%d,d:%d,mo:%d,mu:%d,u:%d,f:%d,au:", pRow->idx, /* id */ @@ -1461,11 +1461,11 @@ ** v Show details of files changed ** f=CHECKIN Show family (immediate parents and children) of CHECKIN ** from=CHECKIN Path from... ** to=CHECKIN ... to this ** shortest ... show only the shortest path -** uf=FILE_SHA1 Show only check-ins that contain the given file version +** uf=FILE_HASH Show only check-ins that contain the given file version ** chng=GLOBLIST Show only check-ins that involve changes to a file whose ** name matches one of the comma-separate GLOBLIST ** brbg Background color from branch name ** ubg Background color from user ** namechng Show only check-ins that have filename changes Index: src/verify.c ================================================================== --- src/verify.c +++ src/verify.c @@ -40,21 +40,19 @@ if( content_size(rid, 0)<0 ){ return; /* No way to verify phantoms */ } blob_zero(&uuid); db_blob(&uuid, "SELECT uuid FROM blob WHERE rid=%d", rid); - if( blob_size(&uuid)!=UUID_SIZE ){ + if( !hname_validate(blob_buffer(&uuid), blob_size(&uuid)) ){ fossil_fatal("not a valid rid: %d", rid); } if( content_get(rid, &content) ){ - sha1sum_blob(&content, &hash); - blob_reset(&content); - if( blob_compare(&uuid, &hash) ){ - fossil_fatal("hash of rid %d (%b) does not match its uuid (%b)", - rid, &hash, &uuid); + if( !hname_verify_hash(&content, blob_buffer(&uuid), blob_size(&uuid)) ){ + fossil_fatal("hash of rid %d does not match its uuid (%b)", + rid, &uuid); } - blob_reset(&hash); + blob_reset(&content); } blob_reset(&uuid); } /* Index: src/vfile.c ================================================================== --- src/vfile.c +++ src/vfile.c @@ -21,12 +21,12 @@ #include "vfile.h" #include #include /* -** The input is guaranteed to be a 40-character well-formed UUID. -** Find its rid. +** The input is guaranteed to be a 40- or 64-character well-formed +** artifact hash. Find its rid. */ int fast_uuid_to_rid(const char *zUuid){ static Stmt q; int rid; db_static_prepare(&q, "SELECT rid FROM blob WHERE uuid=:uuid"); @@ -51,17 +51,17 @@ ** create a phantom record. A private phantom is created for 2 and ** a public phantom is created for 1. */ int uuid_to_rid(const char *zUuid, int phantomize){ int rid, sz; - char z[UUID_SIZE+1]; + char z[HNAME_MAX+1]; sz = strlen(zUuid); - if( sz!=UUID_SIZE || !validate16(zUuid, sz) ){ - return 0; + if( !hname_validate(zUuid, sz) ){ + return 0; /* Not a valid hash */ } - memcpy(z, zUuid, UUID_SIZE+1); + memcpy(z, zUuid, sz+1); canonical16(z, sz); rid = fast_uuid_to_rid(z); if( rid==0 && phantomize ){ rid = content_new(zUuid, phantomize-1); } @@ -130,11 +130,11 @@ /* ** The cksigFlags parameter to vfile_check_signature() is an OR-ed ** combination of the following bits: */ #define CKSIG_ENOTFILE 0x001 /* non-file FS objects throw an error */ -#define CKSIG_SHA1 0x002 /* Verify file content using sha1sum */ +#define CKSIG_HASH 0x002 /* Verify file content using hashing */ #define CKSIG_SETMTIME 0x004 /* Set mtime to last check-out time */ #endif /* INTERFACE */ /* @@ -159,12 +159,12 @@ ** ** If the size of the file has changed, then we always know that the file ** changed without having to look at the mtime or on-disk content. ** ** The mtime of the file is only a factor if the mtime-changes setting -** is false and the useSha1sum flag is false. If the mtime-changes -** setting is true (or undefined - it defaults to true) or if useSha1sum +** is false and the CKSIG_HASH flag is false. If the mtime-changes +** setting is true (or undefined - it defaults to true) or if CKSIG_HASH ** is true, then we do not trust the mtime and will examine the on-disk ** content to determine if a file really is the same. ** ** If the mtime is used, it is used only to determine if files are the same. ** If the mtime of a file has changed, we still examine the on-disk content @@ -172,11 +172,11 @@ */ void vfile_check_signature(int vid, unsigned int cksigFlags){ int nErr = 0; Stmt q; Blob fileCksum, origCksum; - int useMtime = (cksigFlags & CKSIG_SHA1)==0 + int useMtime = (cksigFlags & CKSIG_HASH)==0 && db_get_boolean("mtime-changes", 1); db_begin_transaction(); db_prepare(&q, "SELECT id, %Q || pathname," " vfile.mrid, deleted, chnged, uuid, size, mtime," @@ -222,40 +222,30 @@ chnged = 1; } if( origSize!=currentSize ){ if( chnged!=1 ){ /* A file size change is definitive - the file has changed. No - ** need to check the mtime or sha1sum */ + ** need to check the mtime or hash */ chnged = 1; } }else if( chnged==1 && rid!=0 && !isDeleted ){ /* File is believed to have changed but it is the same size. ** Double check that it really has changed by looking at content. */ + const char *zUuid = db_column_text(&q, 5); + int nUuid = db_column_bytes(&q, 5); assert( origSize==currentSize ); - db_ephemeral_blob(&q, 5, &origCksum); - if( sha1sum_file(zName, &fileCksum) ){ - blob_zero(&fileCksum); - } - if( blob_compare(&fileCksum, &origCksum)==0 ) chnged = 0; - blob_reset(&origCksum); - blob_reset(&fileCksum); + if( hname_verify_file_hash(zName, zUuid, nUuid) ) chnged = 0; }else if( (chnged==0 || chnged==2 || chnged==4) && (useMtime==0 || currentMtime!=oldMtime) ){ /* For files that were formerly believed to be unchanged or that were ** changed by merging, if their mtime changes, or unconditionally - ** if --sha1sum is used, check to see if they have been edited by - ** looking at their SHA1 sum */ + ** if --hash is used, check to see if they have been edited by + ** looking at their artifact hashes */ + const char *zUuid = db_column_text(&q, 5); + int nUuid = db_column_bytes(&q, 5); assert( origSize==currentSize ); - db_ephemeral_blob(&q, 5, &origCksum); - if( sha1sum_file(zName, &fileCksum) ){ - blob_zero(&fileCksum); - } - if( blob_compare(&fileCksum, &origCksum) ){ - chnged = 1; - } - blob_reset(&origCksum); - blob_reset(&fileCksum); + if( !hname_verify_file_hash(zName, zUuid, nUuid) ) chnged = 1; } if( (cksigFlags & CKSIG_SETMTIME) && (chnged==0 || chnged==2 || chnged==4) ){ i64 desiredMtime; if( mtime_of_manifest_file(vid,rid,&desiredMtime)==0 ){ if( currentMtime!=desiredMtime ){ Index: src/wiki.c ================================================================== --- src/wiki.c +++ src/wiki.c @@ -1122,12 +1122,12 @@ */ int wiki_technote_to_rid(const char *zETime) { int rid=0; /* Artifact ID of the tech note */ int nETime = strlen(zETime); Stmt q; - if( nETime>=4 && nETime<=UUID_SIZE && validate16(zETime, nETime) ){ - char zUuid[UUID_SIZE+1]; + if( nETime>=4 && hname_validate(zETime, nETime) ){ + char zUuid[HNAME_MAX+1]; memcpy(zUuid, zETime, nETime+1); canonical16(zUuid, nETime); db_prepare(&q, "SELECT e.objid" " FROM event e, tag t" Index: src/wikiformat.c ================================================================== --- src/wikiformat.c +++ src/wikiformat.c @@ -1067,11 +1067,11 @@ ** If the input string corresponds to an existing baseline, ** return true. */ static int is_valid_uuid(const char *z){ int n = strlen(z); - if( n<4 || n>UUID_SIZE ) return 0; + if( n<4 || n>HNAME_MAX ) return 0; if( !validate16(z, n) ) return 0; return 1; } /* Index: src/xfer.c ================================================================== --- src/xfer.c +++ src/xfer.c @@ -54,11 +54,11 @@ time_t maxTime; /* Time when this transfer should be finished */ }; /* -** The input blob contains a UUID. Convert it into a record ID. +** The input blob contains an artifact. Convert it into a record ID. ** Create a phantom record if no prior record exists and ** phantomize is true. ** ** Compare to uuid_to_rid(). This routine takes a blob argument ** and does less error checking. @@ -100,12 +100,12 @@ ** message. This routine finishes parsing that message and does ** a record insert of the file. ** ** The file line is in one of the following two forms: ** -** file UUID SIZE \n CONTENT -** file UUID DELTASRC SIZE \n CONTENT +** file HASH SIZE \n CONTENT +** file HASH DELTASRC SIZE \n CONTENT ** ** The content is SIZE bytes immediately following the newline. ** If DELTASRC exists, then the CONTENT is a delta against the ** content of DELTASRC. ** @@ -122,29 +122,30 @@ int *pnUuidList ){ int n; int rid; int srcid = 0; - Blob content, hash; + Blob content; int isPriv; + Blob *pUuid; isPriv = pXfer->nextIsPrivate; pXfer->nextIsPrivate = 0; if( pXfer->nToken<3 || pXfer->nToken>4 - || !blob_is_uuid(&pXfer->aToken[1]) + || !blob_is_hname(&pXfer->aToken[1]) || !blob_is_int(&pXfer->aToken[pXfer->nToken-1], &n) || n<0 - || (pXfer->nToken==4 && !blob_is_uuid(&pXfer->aToken[2])) + || (pXfer->nToken==4 && !blob_is_hname(&pXfer->aToken[2])) ){ blob_appendf(&pXfer->err, "malformed file line"); return; } blob_zero(&content); - blob_zero(&hash); blob_extract(pXfer->pIn, n, &content); - if( !cloneFlag && uuid_is_shunned(blob_str(&pXfer->aToken[1])) ){ + pUuid = &pXfer->aToken[1]; + if( !cloneFlag && uuid_is_shunned(blob_str(pUuid)) ){ /* Ignore files that have been shunned */ blob_reset(&content); return; } if( isPriv && !g.perm.Private ){ @@ -158,26 +159,26 @@ pXfer->nDeltaRcvd++; }else{ srcid = 0; pXfer->nFileRcvd++; } - rid = content_put_ex(&content, blob_str(&pXfer->aToken[1]), srcid, + rid = content_put_ex(&content, blob_str(pUuid), srcid, 0, isPriv); - Th_AppendToList(pzUuidList, pnUuidList, blob_str(&pXfer->aToken[1]), - blob_size(&pXfer->aToken[1])); + Th_AppendToList(pzUuidList, pnUuidList, blob_str(pUuid), + blob_size(pUuid)); remote_has(rid); blob_reset(&content); return; } if( pXfer->nToken==4 ){ Blob src, next; srcid = rid_from_uuid(&pXfer->aToken[2], 1, isPriv); if( content_get(srcid, &src)==0 ){ - rid = content_put_ex(&content, blob_str(&pXfer->aToken[1]), srcid, + rid = content_put_ex(&content, blob_str(pUuid), srcid, 0, isPriv); - Th_AppendToList(pzUuidList, pnUuidList, blob_str(&pXfer->aToken[1]), - blob_size(&pXfer->aToken[1])); + Th_AppendToList(pzUuidList, pnUuidList, blob_str(pUuid), + blob_size(pUuid)); pXfer->nDanglingFile++; db_multi_exec("DELETE FROM phantom WHERE rid=%d", rid); if( !isPriv ) content_make_public(rid); blob_reset(&src); blob_reset(&content); @@ -189,19 +190,15 @@ blob_reset(&content); content = next; }else{ pXfer->nFileRcvd++; } - sha1sum_blob(&content, &hash); - if( !blob_eq_str(&pXfer->aToken[1], blob_str(&hash), -1) ){ - blob_appendf(&pXfer->err, - "wrong hash on received artifact: expected %s but got %s", - blob_str(&pXfer->aToken[1]), blob_str(&hash)); - } - rid = content_put_ex(&content, blob_str(&hash), 0, 0, isPriv); - Th_AppendToList(pzUuidList, pnUuidList, blob_str(&hash), blob_size(&hash)); - blob_reset(&hash); + if( hname_verify_hash(&content, blob_buffer(pUuid), blob_size(pUuid))==0 ){ + blob_appendf(&pXfer->err, "wrong hash on received artifact: %b", pUuid); + } + rid = content_put_ex(&content, blob_str(pUuid), 0, 0, isPriv); + Th_AppendToList(pzUuidList, pnUuidList, blob_str(pUuid), blob_size(pUuid)); if( rid==0 ){ blob_appendf(&pXfer->err, "%s", g.zErrMsg); blob_reset(&content); }else{ if( !isPriv ) content_make_public(rid); @@ -217,18 +214,18 @@ ** a record insert of the file. The difference between "file" and ** "cfile" is that with "cfile" the content is already compressed. ** ** The file line is in one of the following two forms: ** -** cfile UUID USIZE CSIZE \n CONTENT -** cfile UUID DELTASRC USIZE CSIZE \n CONTENT +** cfile HASH USIZE CSIZE \n CONTENT +** cfile HASH DELTASRC USIZE CSIZE \n CONTENT ** ** The content is CSIZE bytes immediately following the newline. ** If DELTASRC exists, then the CONTENT is a delta against the ** content of DELTASRC. ** -** The original size of the UUID artifact is USIZE. +** The original size of the HASH artifact is USIZE. ** ** If any error occurs, write a message into pErr which has already ** be initialized to an empty string. ** ** Any artifact successfully received by this routine is considered to @@ -248,15 +245,15 @@ isPriv = pXfer->nextIsPrivate; pXfer->nextIsPrivate = 0; if( pXfer->nToken<4 || pXfer->nToken>5 - || !blob_is_uuid(&pXfer->aToken[1]) + || !blob_is_hname(&pXfer->aToken[1]) || !blob_is_int(&pXfer->aToken[pXfer->nToken-2], &szU) || !blob_is_int(&pXfer->aToken[pXfer->nToken-1], &szC) || szC<0 || szU<0 - || (pXfer->nToken==5 && !blob_is_uuid(&pXfer->aToken[2])) + || (pXfer->nToken==5 && !blob_is_hname(&pXfer->aToken[2])) ){ blob_appendf(&pXfer->err, "malformed cfile line"); return; } if( isPriv && !g.perm.Private ){ @@ -297,11 +294,11 @@ ** ** If the 0x0001 bit of FLAGS is set, that means the file has been ** deleted, SIZE is zero, the HASH is "-", and the "\n CONTENT" is omitted. ** ** SIZE is the number of bytes of CONTENT. The CONTENT is uncompressed. -** HASH is the SHA1 hash of CONTENT. +** HASH is the artifact hash of CONTENT. ** ** If the 0x0004 bit of FLAGS is set, that means the CONTENT is omitted. ** The sender might have omitted the content because it is too big to ** transmit, or because it is unchanged and this record exists purely ** to update the MTIME. @@ -310,11 +307,10 @@ sqlite3_int64 mtime; /* The MTIME */ Blob *pHash; /* The HASH value */ int sz; /* The SIZE */ int flags; /* The FLAGS */ Blob content; /* The CONTENT */ - Blob hash; /* Hash computed from CONTENT to compare with HASH */ Blob x; /* Compressed content */ Stmt q; /* SQL statements for comparison and insert */ int isDelete; /* HASH is "-" indicating this is a delete */ int nullContent; /* True of CONTENT is NULL */ int iStatus; /* Result from unversioned_status() */ @@ -321,25 +317,23 @@ pHash = &pXfer->aToken[3]; if( pXfer->nToken==5 || !blob_is_filename(&pXfer->aToken[1]) || !blob_is_int64(&pXfer->aToken[2], &mtime) - || (!blob_eq(pHash,"-") && !blob_is_uuid(pHash)) + || (!blob_eq(pHash,"-") && !blob_is_hname(pHash)) || !blob_is_int(&pXfer->aToken[4], &sz) || !blob_is_int(&pXfer->aToken[5], &flags) ){ blob_appendf(&pXfer->err, "malformed uvfile line"); return; } blob_init(&content, 0, 0); - blob_init(&hash, 0, 0); blob_init(&x, 0, 0); if( sz>0 && (flags & 0x0005)==0 ){ blob_extract(pXfer->pIn, sz, &content); nullContent = 0; - sha1sum_blob(&content, &hash); - if( blob_compare(&hash, pHash)!=0 ){ + if( hname_verify_hash(&content, blob_buffer(pHash), blob_size(pHash))==0 ){ blob_appendf(&pXfer->err, "in uvfile line, HASH does not match CONTENT"); goto end_accept_unversioned_file; } }else{ nullContent = 1; @@ -399,11 +393,10 @@ db_unset("uv-hash", 0); end_accept_unversioned_file: blob_reset(&x); blob_reset(&content); - blob_reset(&hash); } /* ** Try to send a file as a delta against its parent. ** If successful, return the number of bytes in the delta. @@ -415,11 +408,11 @@ static int send_delta_parent( Xfer *pXfer, /* The transfer context */ int rid, /* record id of the file to send */ int isPrivate, /* True if rid is a private artifact */ Blob *pContent, /* The content of the file to send */ - Blob *pUuid /* The UUID of the file to send */ + Blob *pUuid /* The HASH of the file to send */ ){ static const char *const azQuery[] = { "SELECT pid FROM plink x" " WHERE cid=%d" " AND NOT EXISTS(SELECT 1 FROM phantom WHERE rid=pid)", @@ -469,11 +462,11 @@ */ static int send_delta_native( Xfer *pXfer, /* The transfer context */ int rid, /* record id of the file to send */ int isPrivate, /* True if rid is a private artifact */ - Blob *pUuid /* The UUID of the file to send */ + Blob *pUuid /* The HASH of the file to send */ ){ Blob src, delta; int size = 0; int srcId; @@ -504,11 +497,11 @@ } /* ** Send the file identified by rid. ** -** The pUuid can be NULL in which case the correct UUID is computed +** The pUuid can be NULL in which case the correct hash is computed ** from the rid. ** ** Try to send the file as a native delta if nativeDelta is true, or ** as a parent delta if nativeDelta is false. ** @@ -728,23 +721,24 @@ } db_finalize(&q); } /* -** Compute an SHA1 hash on the tail of pMsg. Verify that it matches the +** Compute an hash on the tail of pMsg. Verify that it matches the ** the hash given in pHash. Return non-zero for an error and 0 on success. +** +** The type of hash computed (SHA1, SHA3-224, SHA3-256) is determined by +** the length of the input hash in pHash. */ static int check_tail_hash(Blob *pHash, Blob *pMsg){ Blob tail; Blob h2; int rc; blob_tail(pMsg, &tail); - sha1sum_blob(&tail, &h2); - rc = blob_compare(pHash, &h2); - blob_reset(&h2); + rc = hname_verify_hash(&tail, blob_buffer(pHash), blob_size(pHash)); blob_reset(&tail); - return rc; + return rc==HNAME_ERROR; } /* ** Check the signature on an application/x-fossil payload received by ** the HTTP server. The signature is a line of the following form: @@ -1153,10 +1147,11 @@ char *zUuidList = 0; int nUuidList = 0; char **pzUuidList = 0; int *pnUuidList = 0; int uvCatalogSent = 0; + int clientVersion = 0; /* Version number of the client */ if( fossil_strcmp(PD("REQUEST_METHOD","POST"),"POST") ){ fossil_redirect_home(); } g.zLogin = "anonymous"; @@ -1198,12 +1193,12 @@ while( blob_line(xfer.pIn, &xfer.line) ){ if( blob_buffer(&xfer.line)[0]=='#' ) continue; if( blob_size(&xfer.line)==0 ) continue; xfer.nToken = blob_tokenize(&xfer.line, xfer.aToken, count(xfer.aToken)); - /* file UUID SIZE \n CONTENT - ** file UUID DELTASRC SIZE \n CONTENT + /* file HASH SIZE \n CONTENT + ** file HASH DELTASRC SIZE \n CONTENT ** ** Accept a file from the client. */ if( blob_eq(&xfer.aToken[0], "file") ){ if( !isPush ){ @@ -1219,12 +1214,12 @@ nErr++; break; } }else - /* cfile UUID USIZE CSIZE \n CONTENT - ** cfile UUID DELTASRC USIZE CSIZE \n CONTENT + /* cfile HASH USIZE CSIZE \n CONTENT + ** cfile HASH DELTASRC USIZE CSIZE \n CONTENT ** ** Accept a file from the client. */ if( blob_eq(&xfer.aToken[0], "cfile") ){ if( !isPush ){ @@ -1254,17 +1249,17 @@ nErr++; break; } }else - /* gimme UUID + /* gimme HASH ** ** Client is requesting a file. Send it. */ if( blob_eq(&xfer.aToken[0], "gimme") && xfer.nToken==2 - && blob_is_uuid(&xfer.aToken[1]) + && blob_is_hname(&xfer.aToken[1]) ){ nGimme++; if( isPull ){ int rid = rid_from_uuid(&xfer.aToken[1], 0, 0); if( rid ){ @@ -1282,18 +1277,18 @@ && blob_is_filename(&xfer.aToken[1]) ){ send_unversioned_file(&xfer, blob_str(&xfer.aToken[1]), 0); }else - /* igot UUID ?ISPRIVATE? + /* igot HASH ?ISPRIVATE? ** ** Client announces that it has a particular file. If the ISPRIVATE ** argument exists and is non-zero, then the file is a private file. */ if( xfer.nToken>=2 && blob_eq(&xfer.aToken[0], "igot") - && blob_is_uuid(&xfer.aToken[1]) + && blob_is_hname(&xfer.aToken[1]) ){ if( isPush ){ if( xfer.nToken==2 || blob_eq(&xfer.aToken[2],"1")==0 ){ rid_from_uuid(&xfer.aToken[1], 1, 0); }else if( g.perm.Private ){ @@ -1307,16 +1302,15 @@ /* pull SERVERCODE PROJECTCODE ** push SERVERCODE PROJECTCODE ** ** The client wants either send or receive. The server should - ** verify that the project code matches. + ** verify that the project code matches. The server code is ignored. */ if( xfer.nToken==3 && (blob_eq(&xfer.aToken[0], "pull") || blob_eq(&xfer.aToken[0], "push")) - && blob_is_uuid(&xfer.aToken[1]) - && blob_is_uuid(&xfer.aToken[2]) + && blob_is_hname(&xfer.aToken[2]) ){ const char *zPCode; zPCode = db_get("project-code", 0); if( zPCode==0 ){ fossil_panic("missing project code"); @@ -1534,19 +1528,27 @@ ** Send igot cards for all known artifacts. */ if( blob_eq(&xfer.aToken[1], "send-catalog") ){ xfer.resync = 0x7fffffff; } + + /* pragma client-version VERSION + ** + ** Let the server know what version of Fossil is running on the client. + */ + if( xfer.nToken>=3 && blob_eq(&xfer.aToken[1], "client-version") ){ + clientVersion = atoi(blob_str(&xfer.aToken[2])); + } /* pragma uv-hash HASH ** ** The client wants to make sure that unversioned files are all synced. ** If the HASH does not match, send a complete catalog of ** "uvigot" cards. */ if( blob_eq(&xfer.aToken[1], "uv-hash") - && blob_is_uuid(&xfer.aToken[2]) + && blob_is_hname(&xfer.aToken[2]) ){ if( !uvCatalogSent ){ if( g.perm.Read && g.perm.WrUnver ){ @ pragma uv-push-ok send_unversioned_catalog(&xfer); @@ -1783,10 +1785,11 @@ } /* ** Always begin with a clone, pull, or push message */ + blob_appendf(&send, "pragma client-version %d\n", RELEASE_VERSION_NUMBER); if( syncFlags & SYNC_CLONE ){ blob_appendf(&send, "clone 3 %d\n", cloneSeqno); syncFlags &= ~(SYNC_PUSH|SYNC_PULL); nCardSent++; /* TBD: Request all transferable configuration values */ @@ -1819,10 +1822,11 @@ db_record_repository_filename(0); db_multi_exec( "CREATE TEMP TABLE onremote(rid INTEGER PRIMARY KEY);" ); manifest_crosslink_begin(); + /* Send back the most recently received cookie. Let the server ** figure out if this is a cookie that it cares about. */ zCookie = db_get("cookie", 0); @@ -2028,22 +2032,22 @@ lastPctDone = pctDone; fflush(stdout); } } - /* file UUID SIZE \n CONTENT - ** file UUID DELTASRC SIZE \n CONTENT + /* file HASH SIZE \n CONTENT + ** file HASH DELTASRC SIZE \n CONTENT ** ** Receive a file transmitted from the server. */ if( blob_eq(&xfer.aToken[0],"file") ){ xfer_accept_file(&xfer, (syncFlags & SYNC_CLONE)!=0, 0, 0); nArtifactRcvd++; }else - /* cfile UUID USIZE CSIZE \n CONTENT - ** cfile UUID DELTASRC USIZE CSIZE \n CONTENT + /* cfile HASH USIZE CSIZE \n CONTENT + ** cfile HASH DELTASRC USIZE CSIZE \n CONTENT ** ** Receive a compressed file transmitted from the server. */ if( blob_eq(&xfer.aToken[0],"cfile") ){ xfer_accept_compressed_file(&xfer, 0, 0); @@ -2062,27 +2066,27 @@ fossil_print("\rUnversioned-file received: %s\n", blob_str(&xfer.aToken[1])); } }else - /* gimme UUID + /* gimme HASH ** ** Server is requesting a file. If the file is a manifest, assume ** that the server will also want to know all of the content files ** associated with the manifest and send those too. */ if( blob_eq(&xfer.aToken[0], "gimme") && xfer.nToken==2 - && blob_is_uuid(&xfer.aToken[1]) + && blob_is_hname(&xfer.aToken[1]) ){ if( syncFlags & SYNC_PUSH ){ int rid = rid_from_uuid(&xfer.aToken[1], 0, 0); if( rid ) send_file(&xfer, rid, &xfer.aToken[1], 0); } }else - /* igot UUID ?PRIVATEFLAG? + /* igot HASH ?PRIVATEFLAG? ** ** Server announces that it has a particular file. If this is ** not a file that we have and we are pulling, then create a ** phantom to cause this file to be requested on the next cycle. ** Always remember that the server has this file so that we do @@ -2092,11 +2096,11 @@ ** private. Pretend it does not exists if we are not pulling ** private files. */ if( xfer.nToken>=2 && blob_eq(&xfer.aToken[0], "igot") - && blob_is_uuid(&xfer.aToken[1]) + && blob_is_hname(&xfer.aToken[1]) ){ int rid; int isPriv = xfer.nToken>=3 && blob_eq(&xfer.aToken[2],"1"); rid = rid_from_uuid(&xfer.aToken[1], 0, 0); if( rid>0 ){ @@ -2126,11 +2130,11 @@ if( xfer.nToken==5 && blob_eq(&xfer.aToken[0], "uvigot") && blob_is_filename(&xfer.aToken[1]) && blob_is_int64(&xfer.aToken[2], &mtime) && blob_is_int(&xfer.aToken[4], &size) - && (blob_eq(&xfer.aToken[3],"-") || blob_is_uuid(&xfer.aToken[3])) + && (blob_eq(&xfer.aToken[3],"-") || blob_is_hname(&xfer.aToken[3])) ){ const char *zName = blob_str(&xfer.aToken[1]); const char *zHash = blob_str(&xfer.aToken[3]); int iStatus; iStatus = unversioned_status(zName, mtime, zHash); @@ -2188,11 +2192,11 @@ ** the client what product to use for the new database. */ if( blob_eq(&xfer.aToken[0],"push") && xfer.nToken==3 && (syncFlags & SYNC_CLONE)!=0 - && blob_is_uuid(&xfer.aToken[2]) + && blob_is_hname(&xfer.aToken[2]) ){ if( zPCode==0 ){ zPCode = mprintf("%b", &xfer.aToken[2]); db_set("project-code", zPCode, 0); } Index: src/zip.c ================================================================== --- src/zip.c +++ src/zip.c @@ -339,11 +339,11 @@ content_get(rid, &mfile); if( blob_size(&mfile)==0 ){ blob_zero(pZip); return; } - blob_zero(&hash); + blob_set_dynamic(&hash, rid_to_uuid(rid)); blob_zero(&filename); zip_open(); if( zDir && zDir[0] ){ blob_appendf(&filename, "%s/", zDir); @@ -378,13 +378,10 @@ if( eflg & MFESTFLG_RAW ){ blob_append(&filename, "manifest", -1); zName = blob_str(&filename); zip_add_folders(zName); } - if( eflg & MFESTFLG_UUID ){ - sha1sum_blob(&mfile, &hash); - } if( eflg & MFESTFLG_RAW ){ sterilize_manifest(&mfile); zip_add_file(zName, &mfile, 0); } } @@ -394,11 +391,10 @@ blob_resize(&filename, nPrefix); blob_append(&filename, "manifest.uuid", -1); zName = blob_str(&filename); zip_add_folders(zName); zip_add_file(zName, &hash, 0); - blob_reset(&hash); } if( eflg & MFESTFLG_TAGS ){ Blob tagslist; blob_zero(&tagslist); get_checkin_taglist(rid, &tagslist); @@ -429,10 +425,11 @@ }else{ blob_reset(&mfile); } manifest_destroy(pManifest); blob_reset(&filename); + blob_reset(&hash); zip_close(pZip); } /* ** COMMAND: zip* Index: win/Makefile.dmc ================================================================== --- win/Makefile.dmc +++ win/Makefile.dmc @@ -28,13 +28,13 @@ SQLITE_OPTIONS = -DNDEBUG=1 -DSQLITE_THREADSAFE=0 -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_DEFAULT_WAL_SYNCHRONOUS=1 -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DSQLITE_OMIT_DECLTYPE -DSQLITE_OMIT_DEPRECATED -DSQLITE_OMIT_PROGRESS_CALLBACK -DSQLITE_OMIT_SHARED_CACHE -DSQLITE_OMIT_LOAD_EXTENSION -DSQLITE_MAX_EXPR_DEPTH=0 -DSQLITE_USE_ALLOCA -DSQLITE_ENABLE_LOCKING_STYLE=0 -DSQLITE_DEFAULT_FILE_FORMAT=4 -DSQLITE_ENABLE_EXPLAIN_COMMENTS -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_DBSTAT_VTAB -DSQLITE_ENABLE_JSON1 -DSQLITE_ENABLE_FTS5 SHELL_OPTIONS = -Dmain=sqlite3_shell -DSQLITE_SHELL_IS_UTF8=1 -DSQLITE_OMIT_LOAD_EXTENSION=1 -DUSE_SYSTEM_SQLITE=$(USE_SYSTEM_SQLITE) -DSQLITE_SHELL_DBNAME_PROC=fossil_open -Daccess=file_access -Dsystem=fossil_system -Dgetenv=fossil_getenv -Dfopen=fossil_fopen -SRC = add_.c allrepo_.c attach_.c bag_.c bisect_.c blob_.c branch_.c browse_.c builtin_.c bundle_.c cache_.c captcha_.c cgi_.c checkin_.c checkout_.c clearsign_.c clone_.c comformat_.c configure_.c content_.c db_.c delta_.c deltacmd_.c descendants_.c diff_.c diffcmd_.c dispatch_.c doc_.c encode_.c event_.c export_.c file_.c finfo_.c foci_.c fshell_.c fusefs_.c glob_.c graph_.c gzip_.c http_.c http_socket_.c http_ssl_.c http_transport_.c import_.c info_.c json_.c json_artifact_.c json_branch_.c json_config_.c json_diff_.c json_dir_.c json_finfo_.c json_login_.c json_query_.c json_report_.c json_status_.c json_tag_.c json_timeline_.c json_user_.c json_wiki_.c leaf_.c loadctrl_.c login_.c lookslike_.c main_.c manifest_.c markdown_.c markdown_html_.c md5_.c merge_.c merge3_.c moderate_.c name_.c path_.c piechart_.c pivot_.c popen_.c pqueue_.c printf_.c publish_.c purge_.c rebuild_.c regexp_.c report_.c rss_.c schema_.c search_.c setup_.c sha1_.c sha3_.c shun_.c sitemap_.c skins_.c sqlcmd_.c stash_.c stat_.c statrep_.c style_.c sync_.c tag_.c tar_.c th_main_.c timeline_.c tkt_.c tktsetup_.c undo_.c unicode_.c unversioned_.c update_.c url_.c user_.c utf8_.c util_.c verify_.c vfile_.c wiki_.c wikiformat_.c winfile_.c winhttp_.c wysiwyg_.c xfer_.c xfersetup_.c zip_.c +SRC = add_.c allrepo_.c attach_.c bag_.c bisect_.c blob_.c branch_.c browse_.c builtin_.c bundle_.c cache_.c captcha_.c cgi_.c checkin_.c checkout_.c clearsign_.c clone_.c comformat_.c configure_.c content_.c db_.c delta_.c deltacmd_.c descendants_.c diff_.c diffcmd_.c dispatch_.c doc_.c encode_.c event_.c export_.c file_.c finfo_.c foci_.c fshell_.c fusefs_.c glob_.c graph_.c gzip_.c hname_.c http_.c http_socket_.c http_ssl_.c http_transport_.c import_.c info_.c json_.c json_artifact_.c json_branch_.c json_config_.c json_diff_.c json_dir_.c json_finfo_.c json_login_.c json_query_.c json_report_.c json_status_.c json_tag_.c json_timeline_.c json_user_.c json_wiki_.c leaf_.c loadctrl_.c login_.c lookslike_.c main_.c manifest_.c markdown_.c markdown_html_.c md5_.c merge_.c merge3_.c moderate_.c name_.c path_.c piechart_.c pivot_.c popen_.c pqueue_.c printf_.c publish_.c purge_.c rebuild_.c regexp_.c report_.c rss_.c schema_.c search_.c setup_.c sha1_.c sha3_.c shun_.c sitemap_.c skins_.c sqlcmd_.c stash_.c stat_.c statrep_.c style_.c sync_.c tag_.c tar_.c th_main_.c timeline_.c tkt_.c tktsetup_.c undo_.c unicode_.c unversioned_.c update_.c url_.c user_.c utf8_.c util_.c verify_.c vfile_.c wiki_.c wikiformat_.c winfile_.c winhttp_.c wysiwyg_.c xfer_.c xfersetup_.c zip_.c -OBJ = $(OBJDIR)\add$O $(OBJDIR)\allrepo$O $(OBJDIR)\attach$O $(OBJDIR)\bag$O $(OBJDIR)\bisect$O $(OBJDIR)\blob$O $(OBJDIR)\branch$O $(OBJDIR)\browse$O $(OBJDIR)\builtin$O $(OBJDIR)\bundle$O $(OBJDIR)\cache$O $(OBJDIR)\captcha$O $(OBJDIR)\cgi$O $(OBJDIR)\checkin$O $(OBJDIR)\checkout$O $(OBJDIR)\clearsign$O $(OBJDIR)\clone$O $(OBJDIR)\comformat$O $(OBJDIR)\configure$O $(OBJDIR)\content$O $(OBJDIR)\db$O $(OBJDIR)\delta$O $(OBJDIR)\deltacmd$O $(OBJDIR)\descendants$O $(OBJDIR)\diff$O $(OBJDIR)\diffcmd$O $(OBJDIR)\dispatch$O $(OBJDIR)\doc$O $(OBJDIR)\encode$O $(OBJDIR)\event$O $(OBJDIR)\export$O $(OBJDIR)\file$O $(OBJDIR)\finfo$O $(OBJDIR)\foci$O $(OBJDIR)\fshell$O $(OBJDIR)\fusefs$O $(OBJDIR)\glob$O $(OBJDIR)\graph$O $(OBJDIR)\gzip$O $(OBJDIR)\http$O $(OBJDIR)\http_socket$O $(OBJDIR)\http_ssl$O $(OBJDIR)\http_transport$O $(OBJDIR)\import$O $(OBJDIR)\info$O $(OBJDIR)\json$O $(OBJDIR)\json_artifact$O $(OBJDIR)\json_branch$O $(OBJDIR)\json_config$O $(OBJDIR)\json_diff$O $(OBJDIR)\json_dir$O $(OBJDIR)\json_finfo$O $(OBJDIR)\json_login$O $(OBJDIR)\json_query$O $(OBJDIR)\json_report$O $(OBJDIR)\json_status$O $(OBJDIR)\json_tag$O $(OBJDIR)\json_timeline$O $(OBJDIR)\json_user$O $(OBJDIR)\json_wiki$O $(OBJDIR)\leaf$O $(OBJDIR)\loadctrl$O $(OBJDIR)\login$O $(OBJDIR)\lookslike$O $(OBJDIR)\main$O $(OBJDIR)\manifest$O $(OBJDIR)\markdown$O $(OBJDIR)\markdown_html$O $(OBJDIR)\md5$O $(OBJDIR)\merge$O $(OBJDIR)\merge3$O $(OBJDIR)\moderate$O $(OBJDIR)\name$O $(OBJDIR)\path$O $(OBJDIR)\piechart$O $(OBJDIR)\pivot$O $(OBJDIR)\popen$O $(OBJDIR)\pqueue$O $(OBJDIR)\printf$O $(OBJDIR)\publish$O $(OBJDIR)\purge$O $(OBJDIR)\rebuild$O $(OBJDIR)\regexp$O $(OBJDIR)\report$O $(OBJDIR)\rss$O $(OBJDIR)\schema$O $(OBJDIR)\search$O $(OBJDIR)\setup$O $(OBJDIR)\sha1$O $(OBJDIR)\sha3$O $(OBJDIR)\shun$O $(OBJDIR)\sitemap$O $(OBJDIR)\skins$O $(OBJDIR)\sqlcmd$O $(OBJDIR)\stash$O $(OBJDIR)\stat$O $(OBJDIR)\statrep$O $(OBJDIR)\style$O $(OBJDIR)\sync$O $(OBJDIR)\tag$O $(OBJDIR)\tar$O $(OBJDIR)\th_main$O $(OBJDIR)\timeline$O $(OBJDIR)\tkt$O $(OBJDIR)\tktsetup$O $(OBJDIR)\undo$O $(OBJDIR)\unicode$O $(OBJDIR)\unversioned$O $(OBJDIR)\update$O $(OBJDIR)\url$O $(OBJDIR)\user$O $(OBJDIR)\utf8$O $(OBJDIR)\util$O $(OBJDIR)\verify$O $(OBJDIR)\vfile$O $(OBJDIR)\wiki$O $(OBJDIR)\wikiformat$O $(OBJDIR)\winfile$O $(OBJDIR)\winhttp$O $(OBJDIR)\wysiwyg$O $(OBJDIR)\xfer$O $(OBJDIR)\xfersetup$O $(OBJDIR)\zip$O $(OBJDIR)\shell$O $(OBJDIR)\sqlite3$O $(OBJDIR)\th$O $(OBJDIR)\th_lang$O +OBJ = $(OBJDIR)\add$O $(OBJDIR)\allrepo$O $(OBJDIR)\attach$O $(OBJDIR)\bag$O $(OBJDIR)\bisect$O $(OBJDIR)\blob$O $(OBJDIR)\branch$O $(OBJDIR)\browse$O $(OBJDIR)\builtin$O $(OBJDIR)\bundle$O $(OBJDIR)\cache$O $(OBJDIR)\captcha$O $(OBJDIR)\cgi$O $(OBJDIR)\checkin$O $(OBJDIR)\checkout$O $(OBJDIR)\clearsign$O $(OBJDIR)\clone$O $(OBJDIR)\comformat$O $(OBJDIR)\configure$O $(OBJDIR)\content$O $(OBJDIR)\db$O $(OBJDIR)\delta$O $(OBJDIR)\deltacmd$O $(OBJDIR)\descendants$O $(OBJDIR)\diff$O $(OBJDIR)\diffcmd$O $(OBJDIR)\dispatch$O $(OBJDIR)\doc$O $(OBJDIR)\encode$O $(OBJDIR)\event$O $(OBJDIR)\export$O $(OBJDIR)\file$O $(OBJDIR)\finfo$O $(OBJDIR)\foci$O $(OBJDIR)\fshell$O $(OBJDIR)\fusefs$O $(OBJDIR)\glob$O $(OBJDIR)\graph$O $(OBJDIR)\gzip$O $(OBJDIR)\hname$O $(OBJDIR)\http$O $(OBJDIR)\http_socket$O $(OBJDIR)\http_ssl$O $(OBJDIR)\http_transport$O $(OBJDIR)\import$O $(OBJDIR)\info$O $(OBJDIR)\json$O $(OBJDIR)\json_artifact$O $(OBJDIR)\json_branch$O $(OBJDIR)\json_config$O $(OBJDIR)\json_diff$O $(OBJDIR)\json_dir$O $(OBJDIR)\json_finfo$O $(OBJDIR)\json_login$O $(OBJDIR)\json_query$O $(OBJDIR)\json_report$O $(OBJDIR)\json_status$O $(OBJDIR)\json_tag$O $(OBJDIR)\json_timeline$O $(OBJDIR)\json_user$O $(OBJDIR)\json_wiki$O $(OBJDIR)\leaf$O $(OBJDIR)\loadctrl$O $(OBJDIR)\login$O $(OBJDIR)\lookslike$O $(OBJDIR)\main$O $(OBJDIR)\manifest$O $(OBJDIR)\markdown$O $(OBJDIR)\markdown_html$O $(OBJDIR)\md5$O $(OBJDIR)\merge$O $(OBJDIR)\merge3$O $(OBJDIR)\moderate$O $(OBJDIR)\name$O $(OBJDIR)\path$O $(OBJDIR)\piechart$O $(OBJDIR)\pivot$O $(OBJDIR)\popen$O $(OBJDIR)\pqueue$O $(OBJDIR)\printf$O $(OBJDIR)\publish$O $(OBJDIR)\purge$O $(OBJDIR)\rebuild$O $(OBJDIR)\regexp$O $(OBJDIR)\report$O $(OBJDIR)\rss$O $(OBJDIR)\schema$O $(OBJDIR)\search$O $(OBJDIR)\setup$O $(OBJDIR)\sha1$O $(OBJDIR)\sha3$O $(OBJDIR)\shun$O $(OBJDIR)\sitemap$O $(OBJDIR)\skins$O $(OBJDIR)\sqlcmd$O $(OBJDIR)\stash$O $(OBJDIR)\stat$O $(OBJDIR)\statrep$O $(OBJDIR)\style$O $(OBJDIR)\sync$O $(OBJDIR)\tag$O $(OBJDIR)\tar$O $(OBJDIR)\th_main$O $(OBJDIR)\timeline$O $(OBJDIR)\tkt$O $(OBJDIR)\tktsetup$O $(OBJDIR)\undo$O $(OBJDIR)\unicode$O $(OBJDIR)\unversioned$O $(OBJDIR)\update$O $(OBJDIR)\url$O $(OBJDIR)\user$O $(OBJDIR)\utf8$O $(OBJDIR)\util$O $(OBJDIR)\verify$O $(OBJDIR)\vfile$O $(OBJDIR)\wiki$O $(OBJDIR)\wikiformat$O $(OBJDIR)\winfile$O $(OBJDIR)\winhttp$O $(OBJDIR)\wysiwyg$O $(OBJDIR)\xfer$O $(OBJDIR)\xfersetup$O $(OBJDIR)\zip$O $(OBJDIR)\shell$O $(OBJDIR)\sqlite3$O $(OBJDIR)\th$O $(OBJDIR)\th_lang$O RC=$(DMDIR)\bin\rcc RCFLAGS=-32 -w1 -I$(SRCDIR) /D__DMC__ @@ -49,11 +49,11 @@ $(OBJDIR)\fossil.res: $B\win\fossil.rc $(RC) $(RCFLAGS) -o$@ $** $(OBJDIR)\link: $B\win\Makefile.dmc $(OBJDIR)\fossil.res - +echo add allrepo attach bag bisect blob branch browse builtin bundle cache captcha cgi checkin checkout clearsign clone comformat configure content db delta deltacmd descendants diff diffcmd dispatch doc encode event export file finfo foci fshell fusefs glob graph gzip http http_socket http_ssl http_transport import info json json_artifact json_branch json_config json_diff json_dir json_finfo json_login json_query json_report json_status json_tag json_timeline json_user json_wiki leaf loadctrl login lookslike main manifest markdown markdown_html md5 merge merge3 moderate name path piechart pivot popen pqueue printf publish purge rebuild regexp report rss schema search setup sha1 sha3 shun sitemap skins sqlcmd stash stat statrep style sync tag tar th_main timeline tkt tktsetup undo unicode unversioned update url user utf8 util verify vfile wiki wikiformat winfile winhttp wysiwyg xfer xfersetup zip shell sqlite3 th th_lang > $@ + +echo add allrepo attach bag bisect blob branch browse builtin bundle cache captcha cgi checkin checkout clearsign clone comformat configure content db delta deltacmd descendants diff diffcmd dispatch doc encode event export file finfo foci fshell fusefs glob graph gzip hname http http_socket http_ssl http_transport import info json json_artifact json_branch json_config json_diff json_dir json_finfo json_login json_query json_report json_status json_tag json_timeline json_user json_wiki leaf loadctrl login lookslike main manifest markdown markdown_html md5 merge merge3 moderate name path piechart pivot popen pqueue printf publish purge rebuild regexp report rss schema search setup sha1 sha3 shun sitemap skins sqlcmd stash stat statrep style sync tag tar th_main timeline tkt tktsetup undo unicode unversioned update url user utf8 util verify vfile wiki wikiformat winfile winhttp wysiwyg xfer xfersetup zip shell sqlite3 th th_lang > $@ +echo fossil >> $@ +echo fossil >> $@ +echo $(LIBS) >> $@ +echo. >> $@ +echo fossil >> $@ @@ -356,10 +356,16 @@ $(OBJDIR)\gzip$O : gzip_.c gzip.h $(TCC) -o$@ -c gzip_.c gzip_.c : $(SRCDIR)\gzip.c +translate$E $** > $@ + +$(OBJDIR)\hname$O : hname_.c hname.h + $(TCC) -o$@ -c hname_.c + +hname_.c : $(SRCDIR)\hname.c + +translate$E $** > $@ $(OBJDIR)\http$O : http_.c http.h $(TCC) -o$@ -c http_.c http_.c : $(SRCDIR)\http.c @@ -862,7 +868,7 @@ zip_.c : $(SRCDIR)\zip.c +translate$E $** > $@ headers: makeheaders$E page_index.h builtin_data.h VERSION.h - +makeheaders$E add_.c:add.h allrepo_.c:allrepo.h attach_.c:attach.h bag_.c:bag.h bisect_.c:bisect.h blob_.c:blob.h branch_.c:branch.h browse_.c:browse.h builtin_.c:builtin.h bundle_.c:bundle.h cache_.c:cache.h captcha_.c:captcha.h cgi_.c:cgi.h checkin_.c:checkin.h checkout_.c:checkout.h clearsign_.c:clearsign.h clone_.c:clone.h comformat_.c:comformat.h configure_.c:configure.h content_.c:content.h db_.c:db.h delta_.c:delta.h deltacmd_.c:deltacmd.h descendants_.c:descendants.h diff_.c:diff.h diffcmd_.c:diffcmd.h dispatch_.c:dispatch.h doc_.c:doc.h encode_.c:encode.h event_.c:event.h export_.c:export.h file_.c:file.h finfo_.c:finfo.h foci_.c:foci.h fshell_.c:fshell.h fusefs_.c:fusefs.h glob_.c:glob.h graph_.c:graph.h gzip_.c:gzip.h http_.c:http.h http_socket_.c:http_socket.h http_ssl_.c:http_ssl.h http_transport_.c:http_transport.h import_.c:import.h info_.c:info.h json_.c:json.h json_artifact_.c:json_artifact.h json_branch_.c:json_branch.h json_config_.c:json_config.h json_diff_.c:json_diff.h json_dir_.c:json_dir.h json_finfo_.c:json_finfo.h json_login_.c:json_login.h json_query_.c:json_query.h json_report_.c:json_report.h json_status_.c:json_status.h json_tag_.c:json_tag.h json_timeline_.c:json_timeline.h json_user_.c:json_user.h json_wiki_.c:json_wiki.h leaf_.c:leaf.h loadctrl_.c:loadctrl.h login_.c:login.h lookslike_.c:lookslike.h main_.c:main.h manifest_.c:manifest.h markdown_.c:markdown.h markdown_html_.c:markdown_html.h md5_.c:md5.h merge_.c:merge.h merge3_.c:merge3.h moderate_.c:moderate.h name_.c:name.h path_.c:path.h piechart_.c:piechart.h pivot_.c:pivot.h popen_.c:popen.h pqueue_.c:pqueue.h printf_.c:printf.h publish_.c:publish.h purge_.c:purge.h rebuild_.c:rebuild.h regexp_.c:regexp.h report_.c:report.h rss_.c:rss.h schema_.c:schema.h search_.c:search.h setup_.c:setup.h sha1_.c:sha1.h sha3_.c:sha3.h shun_.c:shun.h sitemap_.c:sitemap.h skins_.c:skins.h sqlcmd_.c:sqlcmd.h stash_.c:stash.h stat_.c:stat.h statrep_.c:statrep.h style_.c:style.h sync_.c:sync.h tag_.c:tag.h tar_.c:tar.h th_main_.c:th_main.h timeline_.c:timeline.h tkt_.c:tkt.h tktsetup_.c:tktsetup.h undo_.c:undo.h unicode_.c:unicode.h unversioned_.c:unversioned.h update_.c:update.h url_.c:url.h user_.c:user.h utf8_.c:utf8.h util_.c:util.h verify_.c:verify.h vfile_.c:vfile.h wiki_.c:wiki.h wikiformat_.c:wikiformat.h winfile_.c:winfile.h winhttp_.c:winhttp.h wysiwyg_.c:wysiwyg.h xfer_.c:xfer.h xfersetup_.c:xfersetup.h zip_.c:zip.h $(SRCDIR)\sqlite3.h $(SRCDIR)\th.h VERSION.h $(SRCDIR)\cson_amalgamation.h + +makeheaders$E add_.c:add.h allrepo_.c:allrepo.h attach_.c:attach.h bag_.c:bag.h bisect_.c:bisect.h blob_.c:blob.h branch_.c:branch.h browse_.c:browse.h builtin_.c:builtin.h bundle_.c:bundle.h cache_.c:cache.h captcha_.c:captcha.h cgi_.c:cgi.h checkin_.c:checkin.h checkout_.c:checkout.h clearsign_.c:clearsign.h clone_.c:clone.h comformat_.c:comformat.h configure_.c:configure.h content_.c:content.h db_.c:db.h delta_.c:delta.h deltacmd_.c:deltacmd.h descendants_.c:descendants.h diff_.c:diff.h diffcmd_.c:diffcmd.h dispatch_.c:dispatch.h doc_.c:doc.h encode_.c:encode.h event_.c:event.h export_.c:export.h file_.c:file.h finfo_.c:finfo.h foci_.c:foci.h fshell_.c:fshell.h fusefs_.c:fusefs.h glob_.c:glob.h graph_.c:graph.h gzip_.c:gzip.h hname_.c:hname.h http_.c:http.h http_socket_.c:http_socket.h http_ssl_.c:http_ssl.h http_transport_.c:http_transport.h import_.c:import.h info_.c:info.h json_.c:json.h json_artifact_.c:json_artifact.h json_branch_.c:json_branch.h json_config_.c:json_config.h json_diff_.c:json_diff.h json_dir_.c:json_dir.h json_finfo_.c:json_finfo.h json_login_.c:json_login.h json_query_.c:json_query.h json_report_.c:json_report.h json_status_.c:json_status.h json_tag_.c:json_tag.h json_timeline_.c:json_timeline.h json_user_.c:json_user.h json_wiki_.c:json_wiki.h leaf_.c:leaf.h loadctrl_.c:loadctrl.h login_.c:login.h lookslike_.c:lookslike.h main_.c:main.h manifest_.c:manifest.h markdown_.c:markdown.h markdown_html_.c:markdown_html.h md5_.c:md5.h merge_.c:merge.h merge3_.c:merge3.h moderate_.c:moderate.h name_.c:name.h path_.c:path.h piechart_.c:piechart.h pivot_.c:pivot.h popen_.c:popen.h pqueue_.c:pqueue.h printf_.c:printf.h publish_.c:publish.h purge_.c:purge.h rebuild_.c:rebuild.h regexp_.c:regexp.h report_.c:report.h rss_.c:rss.h schema_.c:schema.h search_.c:search.h setup_.c:setup.h sha1_.c:sha1.h sha3_.c:sha3.h shun_.c:shun.h sitemap_.c:sitemap.h skins_.c:skins.h sqlcmd_.c:sqlcmd.h stash_.c:stash.h stat_.c:stat.h statrep_.c:statrep.h style_.c:style.h sync_.c:sync.h tag_.c:tag.h tar_.c:tar.h th_main_.c:th_main.h timeline_.c:timeline.h tkt_.c:tkt.h tktsetup_.c:tktsetup.h undo_.c:undo.h unicode_.c:unicode.h unversioned_.c:unversioned.h update_.c:update.h url_.c:url.h user_.c:user.h utf8_.c:utf8.h util_.c:util.h verify_.c:verify.h vfile_.c:vfile.h wiki_.c:wiki.h wikiformat_.c:wikiformat.h winfile_.c:winfile.h winhttp_.c:winhttp.h wysiwyg_.c:wysiwyg.h xfer_.c:xfer.h xfersetup_.c:xfersetup.h zip_.c:zip.h $(SRCDIR)\sqlite3.h $(SRCDIR)\th.h VERSION.h $(SRCDIR)\cson_amalgamation.h @copy /Y nul: headers Index: win/Makefile.mingw ================================================================== --- win/Makefile.mingw +++ win/Makefile.mingw @@ -461,10 +461,11 @@ $(SRCDIR)/fshell.c \ $(SRCDIR)/fusefs.c \ $(SRCDIR)/glob.c \ $(SRCDIR)/graph.c \ $(SRCDIR)/gzip.c \ + $(SRCDIR)/hname.c \ $(SRCDIR)/http.c \ $(SRCDIR)/http_socket.c \ $(SRCDIR)/http_ssl.c \ $(SRCDIR)/http_transport.c \ $(SRCDIR)/import.c \ @@ -637,10 +638,11 @@ $(OBJDIR)/fshell_.c \ $(OBJDIR)/fusefs_.c \ $(OBJDIR)/glob_.c \ $(OBJDIR)/graph_.c \ $(OBJDIR)/gzip_.c \ + $(OBJDIR)/hname_.c \ $(OBJDIR)/http_.c \ $(OBJDIR)/http_socket_.c \ $(OBJDIR)/http_ssl_.c \ $(OBJDIR)/http_transport_.c \ $(OBJDIR)/import_.c \ @@ -762,10 +764,11 @@ $(OBJDIR)/fshell.o \ $(OBJDIR)/fusefs.o \ $(OBJDIR)/glob.o \ $(OBJDIR)/graph.o \ $(OBJDIR)/gzip.o \ + $(OBJDIR)/hname.o \ $(OBJDIR)/http.o \ $(OBJDIR)/http_socket.o \ $(OBJDIR)/http_ssl.o \ $(OBJDIR)/http_transport.o \ $(OBJDIR)/import.o \ @@ -1098,10 +1101,11 @@ $(OBJDIR)/fshell_.c:$(OBJDIR)/fshell.h \ $(OBJDIR)/fusefs_.c:$(OBJDIR)/fusefs.h \ $(OBJDIR)/glob_.c:$(OBJDIR)/glob.h \ $(OBJDIR)/graph_.c:$(OBJDIR)/graph.h \ $(OBJDIR)/gzip_.c:$(OBJDIR)/gzip.h \ + $(OBJDIR)/hname_.c:$(OBJDIR)/hname.h \ $(OBJDIR)/http_.c:$(OBJDIR)/http.h \ $(OBJDIR)/http_socket_.c:$(OBJDIR)/http_socket.h \ $(OBJDIR)/http_ssl_.c:$(OBJDIR)/http_ssl.h \ $(OBJDIR)/http_transport_.c:$(OBJDIR)/http_transport.h \ $(OBJDIR)/import_.c:$(OBJDIR)/import.h \ @@ -1502,10 +1506,18 @@ $(OBJDIR)/gzip.o: $(OBJDIR)/gzip_.c $(OBJDIR)/gzip.h $(SRCDIR)/config.h $(XTCC) -o $(OBJDIR)/gzip.o -c $(OBJDIR)/gzip_.c $(OBJDIR)/gzip.h: $(OBJDIR)/headers + +$(OBJDIR)/hname_.c: $(SRCDIR)/hname.c $(TRANSLATE) + $(TRANSLATE) $(SRCDIR)/hname.c >$@ + +$(OBJDIR)/hname.o: $(OBJDIR)/hname_.c $(OBJDIR)/hname.h $(SRCDIR)/config.h + $(XTCC) -o $(OBJDIR)/hname.o -c $(OBJDIR)/hname_.c + +$(OBJDIR)/hname.h: $(OBJDIR)/headers $(OBJDIR)/http_.c: $(SRCDIR)/http.c $(TRANSLATE) $(TRANSLATE) $(SRCDIR)/http.c >$@ $(OBJDIR)/http.o: $(OBJDIR)/http_.c $(OBJDIR)/http.h $(SRCDIR)/config.h Index: win/Makefile.msc ================================================================== --- win/Makefile.msc +++ win/Makefile.msc @@ -386,10 +386,11 @@ fshell_.c \ fusefs_.c \ glob_.c \ graph_.c \ gzip_.c \ + hname_.c \ http_.c \ http_socket_.c \ http_ssl_.c \ http_transport_.c \ import_.c \ @@ -561,10 +562,11 @@ $(OX)\fshell$O \ $(OX)\fusefs$O \ $(OX)\glob$O \ $(OX)\graph$O \ $(OX)\gzip$O \ + $(OX)\hname$O \ $(OX)\http$O \ $(OX)\http_socket$O \ $(OX)\http_ssl$O \ $(OX)\http_transport$O \ $(OX)\import$O \ @@ -745,10 +747,11 @@ echo $(OX)\fshell.obj >> $@ echo $(OX)\fusefs.obj >> $@ echo $(OX)\glob.obj >> $@ echo $(OX)\graph.obj >> $@ echo $(OX)\gzip.obj >> $@ + echo $(OX)\hname.obj >> $@ echo $(OX)\http.obj >> $@ echo $(OX)\http_socket.obj >> $@ echo $(OX)\http_ssl.obj >> $@ echo $(OX)\http_transport.obj >> $@ echo $(OX)\import.obj >> $@ @@ -1178,10 +1181,16 @@ $(OX)\gzip$O : gzip_.c gzip.h $(TCC) /Fo$@ -c gzip_.c gzip_.c : $(SRCDIR)\gzip.c translate$E $** > $@ + +$(OX)\hname$O : hname_.c hname.h + $(TCC) /Fo$@ -c hname_.c + +hname_.c : $(SRCDIR)\hname.c + translate$E $** > $@ $(OX)\http$O : http_.c http.h $(TCC) /Fo$@ -c http_.c http_.c : $(SRCDIR)\http.c @@ -1726,10 +1735,11 @@ fshell_.c:fshell.h \ fusefs_.c:fusefs.h \ glob_.c:glob.h \ graph_.c:graph.h \ gzip_.c:gzip.h \ + hname_.c:hname.h \ http_.c:http.h \ http_socket_.c:http_socket.h \ http_ssl_.c:http_ssl.h \ http_transport_.c:http_transport.h \ import_.c:import.h \ Index: www/changes.wiki ================================================================== --- www/changes.wiki +++ www/changes.wiki @@ -1,10 +1,13 @@ Change Log - -

      Changes for Version 1.38 (2017-??-??)

      + +

      Changes for Version 2.0 (2017-03-??)

      + * Added support for SHA3 hashes used as + [./fileformat.wiki#names|artifact names]. + * Added support for the [/help?cmd=sha3sum|sha3sum] command. * Update the built-in SQLite to version 3.17.0.

      Changes for Version 1.37 (2017-01-16)

      Index: www/fileformat.wiki ================================================================== --- www/fileformat.wiki +++ www/fileformat.wiki @@ -9,14 +9,19 @@ searchable, and extensible by people not yet born. The global state of a fossil repository is an unordered set of artifacts. An artifact might be a source code file, the text of a wiki page, -part of a trouble ticket, or one of several special control artifacts -used to show the relationships between other artifacts within the -project. Each artifact is normally represented on disk as a separate -file. Artifacts can be text or binary. +part of a trouble ticket, a description of a check-in including all +the files in that check-in with the check-in comment and so forth. +Artifacts are broadly grouped into two types: content artifacts and +structural artifacts. Content artifacts are the raw project source-code +files that are checked into the repository. Structural artifacts have +special formatting rules and are used to show the relationships between +other artifacts in the repository. It is possible for an artifact to +be both a structure artifact and a content artifact, though this is +rare. Artifacts can be text or binary. In addition to the global state, each fossil repository also contains local state. The local state consists of web-page formatting preferences, authorized users, ticket display and reporting formats, @@ -27,18 +32,45 @@ with the global state. The local state is not composed of artifacts and is not intended to be enduring. This document is concerned with global state only. Local state is only mentioned here in order to distinguish it from global state. -Each artifact in the repository is named by its SHA1 hash. -No prefixes or meta information is added to an artifact before -its hash is computed. The name of an artifact in the repository -is exactly the same SHA1 hash that is computed by sha1sum -on the file as it exists in your source tree.

      - -Some artifacts have a particular format which gives them special -meaning to fossil. Fossil recognizes: + +

      1.0 Artifact Names

      + +Each artifact in the repository is named by a hash of its content. +No prefixes, suffixes, or other information is added to an artifact before +the hash is computed. The artifact name is just the (lower-case +hexadecimal) hash of the raw artifact. + +Fossil supports multiple hash algorithms including SHA1 and various +lengths of SHA3. Because an artifact can be hashed using multiple algorithms, +a single artifact can have multiple names. Usually, Fossil knows +each artifact by just a single name called the "display name". But it is +possible for Fossil to know an artifact by multiple names from different +hashes. In that case, Fossil uses the display name for output, but continues +to accept the alternative names as command-line arguments or as parameters to +webpage URLs. + +When referring to artifacts in using tty commands or webpage URLs, it is +sufficient to specify a unique prefix for the artifact name. If the input +prefix is not unique, Fossil will show an error. Within a structural +artifact, however, all references to other artifacts must be the complete +hash. + +Prior to Fossil version 2.0, all names were formed from the SHA1 hash of +the artifact. The key innovation in Fossil 2.0 was adding support for +alternative hash algorithms. + + +

      2.0 Structural Artifacts

      + +A structural artifact is an artifact that has a particular format and +that is used to define the relationships between other artifacts in the +repository. +Fossil recognizes the following kinds of structural +artifacts:
      • [#manifest | Manifests]
      • [#cluster | Clusters]
      • [#ctrl | Control Artifacts]
      • @@ -46,66 +78,55 @@
      • [#tktchng | Ticket Changes]
      • [#attachment | Attachments]
      • [#event | TechNotes]
      -These seven artifact types are described in the following sections. +These seven structural artifact types are described in subsections below. -In the current implementation (as of 2009-01-25) the artifacts that +Structural artifacts are ASCII text. The artifact may be PGP clearsigned. +After removal of the PGP clearsign header and suffix (if any) a structural +artifact consists of one or more "cards" separated by a single newline +(ASCII: 0x0a) character. Each card begins with a single +character "card type". Zero or more arguments may follow +the card type. All arguments are separated from each other +and from the card-type character by a single space +character. There is no surplus white space between arguments +and no leading or trailing whitespace except for the newline +character that acts as the card separator. All cards must be in strict +lexicographical order. There may not be any duplicate cards. + +In the current implementation (as of 2017-02-27) the artifacts that make up a fossil repository are stored as delta- and zlib-compressed blobs in an SQLite database. This is an implementation detail and might change in a future release. For the purpose of this article "file format" means the format of the artifacts, not how the artifacts are stored on disk. It is the artifact format that is intended to be enduring. The specifics of how artifacts are stored on disk, though stable, is not intended to live as long as the artifact format. -All of the artifacts can be extracted from a Fossil repository using -the "fossil deconstruct" command. - -

      1.0 The Manifest

      +

      2.1 The Manifest

      -A manifest defines a check-in or version of the project -source tree. The manifest contains a list of artifacts for +A manifest defines a check-in. +A manifest contains a list of artifacts for each file in the project and the corresponding filenames, as -well as information such as parent check-ins, the name of the +well as information such as parent check-ins, the username of the programmer who created the check-in, the date and time when the check-in was created, and any check-in comments associated with the check-in. -Any artifact in the repository that follows the syntactic rules -of a manifest is a manifest. Note that a manifest can -be both a real manifest and also a content file, though this -is rare. - -A manifest is a text file. Newline characters -(ASCII 0x0a) separate the file into "cards". -Each card begins with a single -character "card type". Zero or more arguments may follow -the card type. All arguments are separated from each other -and from the card-type character by a single space -character. There is no surplus white space between arguments -and no leading or trailing whitespace except for the newline -character that acts as the card separator. - -All cards of the manifest occur in strict sorted lexicographical order. -No card may be duplicated. -The entire manifest may be PGP clear-signed, but otherwise it -may contain no additional text or data beyond what is described here. - Allowed cards in the manifest are as follows:
      B baseline-manifest
      C checkin-comment
      D time-and-date-stamp
      -F filename ?SHA1-hash? ?permissions? ?old-name?
      +F filename ?hash? ?permissions? ?old-name?
      N mimetype
      -P SHA1-hash+
      -Q (+|-)SHA1-hash ?SHA1-hash?
      +P artifact-hash+
      +Q (+|-)artifact-hash ?artifact-hash?
      R repository-checksum
      T (+|-|*)tag-name * ?value?
      U user-login
      Z manifest-checksum
      @@ -145,11 +166,11 @@ check-in relative to the root of the project file hierarchy. No ".." or "." directories are allowed within the filename. Space characters are escaped as in C-card comment text. Backslash characters and newlines are not allowed within filenames. The directory separator character is a forward slash (ASCII 0x2F). The second argument to the -F-card is the full 40-character lower-case hexadecimal SHA1 hash of +F-card is the lower-case hexadecimal artifact hash of the content artifact. The second argument is required for baseline manifests but is optional for delta manifests. When the second argument to the F-card is omitted, it means that the file has been deleted relative to the baseline (files removed in baseline manifests versions are not added as F-cards). The optional 3rd argument @@ -167,14 +188,14 @@ is used. A manifest has zero or one P-cards. Most manifests have one P-card. The P-card has a varying number of arguments that define other manifests from which the current manifest -is derived. Each argument is a 40-character lowercase -hexadecimal SHA1 of a predecessor manifest. All arguments +is derived. Each argument is a lowercase +hexadecimal artifact hash of a predecessor manifest. All arguments to the P-card must be unique within that card. -The first argument is the SHA1 of the direct ancestor of the manifest. +The first argument is the artifact hash of the direct ancestor of the manifest. Other arguments define manifests with which the first was merged to yield the current manifest. Most manifests have a P-card with a single argument. The first manifest in the project has no ancestors and thus has no P-card or (depending on the Fossil version) an empty P-card (no arguments). @@ -229,41 +250,27 @@ check-in comment argument to the C-card. A manifest must have a single Z-card as its last line. The argument to the Z-card is a 32-character lowercase hexadecimal MD5 hash of all prior lines of the manifest up to and including the newline -character that immediately precedes the "Z". The Z-card is +character that immediately precedes the "Z", excluding any PGP +clear-signing prefix. The Z-card is a sanity check to prove that the manifest is well-formed and consistent. A sample manifest from Fossil itself can be seen [/artifact/28987096ac | here]. -

      2.0 Clusters

      +

      2.2 Clusters

      A cluster is an artifact that declares the existence of other artifacts. Clusters are used during repository synchronization to help reduce network traffic. As such, clusters are an optimization and may be removed from a repository without loss or damage to the underlying project code. -Clusters follow a syntax that is very similar to manifests. -A cluster is a line-oriented text file. Newline characters -(ASCII 0x0a) separate the artifact into cards. Each card begins with a single -character "card type". Zero or more arguments may follow -the card type. All arguments are separated from each other -and from the card-type character by a single space -character. There is no surplus white space between arguments -and no leading or trailing whitespace except for the newline -character that acts as the card separator. -All cards of a cluster occur in strict sorted lexicographical order. -No card may be duplicated. -The cluster may not contain additional text or data beyond -what is described here. -Unlike manifests, clusters are never PGP signed. - Allowed cards in the cluster are as follows:
      M artifact-id
      Z checksum @@ -278,20 +285,14 @@ An example cluster from Fossil can be seen [/artifact/d03dbdd73a2a8 | here]. -

      3.0 Control Artifacts

      +

      2.3 Control Artifacts

      Control artifacts are used to assign properties to other artifacts -within the repository. The basic format of a control artifact is -the same as a manifest or cluster. A control artifact is a text -file divided into cards by newline characters. Each card has a -single-character card type followed by arguments. Spaces separate -the card type and the arguments. No surplus whitespace is allowed. -All cards must occur in strict lexicographical order. - +within the repository. Allowed cards in a control artifact are as follows:
      D time-and-date-stamp
      T (+|-|*)tag-name artifact-id ?value?
      @@ -338,16 +339,15 @@ An example control artifacts can be seen [/info/9d302ccda8 | here]. -

      4.0 Wiki Pages

      +

      2.4 Wiki Pages

      -A wiki page is an artifact with a format similar to manifests, -clusters, and control artifacts. The artifact is divided into -cards by newline characters. The format of each card is as in -manifests, clusters, and control artifacts. Wiki artifacts accept +A wiki artifact defines a single version of a +single wiki page. +Wiki artifacts accept the following card types:
      D time-and-date-stamp
      L wiki-title
      @@ -375,11 +375,11 @@ An example wiki artifact can be seen [/artifact?name=7b2f5fd0e0&txt=1 | here]. -

      5.0 Ticket Changes

      +

      2.5 Ticket Changes

      A ticket-change artifact represents a change to a trouble ticket. The following cards are allowed on a ticket change artifact:
      @@ -421,11 +421,11 @@ An example ticket-change artifact can be seen [/artifact/91f1ec6af053 | here]. -

      6.0 Attachments

      +

      2.6 Attachments

      An attachment artifact associates some other artifact that is the attachment (the source artifact) with a ticket or wiki page or technical note to which the attachment is connected (the target artifact). @@ -463,11 +463,11 @@ The Z card is the usual checksum over the rest of the attachment artifact. The Z card is required. -

      7.0 Technical Notes

      +

      2.7 Technical Notes

      A technical note or "technote" artifact (formerly known as an "event" artifact) associates a timeline comment and a page of text (similar to a wiki page) with a point in time. Technotes can be used to record project milestones, release notes, blog entries, process @@ -532,11 +532,11 @@ The Z card is the required checksum over the rest of the artifact. -

      8.0 Card Summary

      +

      3.0 Card Summary

      The following table summarizes the various kinds of cards that appear on Fossil artifacts. A blank entry means that combination of card and artifact is not legal. A number or range of numbers indicates the number of times a card may (or must) appear in the corresponding artifact type. @@ -739,16 +739,16 @@ -

      9.0 Addenda

      +

      4.0 Addenda

      This section contains additional information which may be useful when implementing algorithms described above. -

      R Card Hash Calculation

      +

      4.1 R-Card Hash Calculation

      Given a manifest file named MF, the following Bash shell code demonstrates how to compute the value of the R card in that manifest. This example uses manifest [28987096ac]. Lines starting with # are shell input and other lines are output. This demonstration assumes that the