X-Git-Url: http://dolda2000.com/gitweb/?a=blobdiff_plain;f=daemon%2Fclient.c;h=50da10dea9f8ff906b6a49a527f9014922fb4c60;hb=62dbcd8559ef5d6b6b51372e1b58afd14a85140d;hp=e7fa0e72ce0a2f4aa20914a9b9f3b534b8f8ae43;hpb=d3372da97568d5e1f35fa19787c8ec8af93a0435;p=doldaconnect.git diff --git a/daemon/client.c b/daemon/client.c index e7fa0e7..50da10d 100644 --- a/daemon/client.c +++ b/daemon/client.c @@ -76,7 +76,11 @@ static struct scanstate *scanjob = NULL; static struct scanqueue *scanqueue = NULL; static struct sharepoint *shares = NULL; static struct hashcache *hashcache = NULL; -static pid_t hashjob = 0; +/* Set initially to -1, but changed to 0 the first time run() is + * called. This is to avoid forking a hash job before daemonizing, + * since that would make the daemon unable to wait() for the hash + * job. */ +static pid_t hashjob = -1; struct sharecache *shareroot = NULL; unsigned long long sharesize = 0; GCBCHAIN(sharechangecb, unsigned long long); @@ -131,6 +135,88 @@ static void dumpsharecache(struct sharecache *node, int l) } } +struct hash *newhash(wchar_t *algo, size_t len, char *buf) +{ + struct hash *ret; + + ret = smalloc(sizeof(*ret)); + memset(ret, 0, sizeof(*ret)); + ret->algo = swcsdup(algo); + ret->len = len; + ret->buf = memcpy(smalloc(len), buf, len); + return(ret); +} + +void freehash(struct hash *hash) +{ + free(hash->algo); + free(hash->buf); + free(hash); +} + +struct hash *duphash(struct hash *hash) +{ + return(newhash(hash->algo, hash->len, hash->buf)); +} + +struct hash *parsehash(wchar_t *text) +{ + wchar_t *p; + char *mbsbuf, *decbuf; + size_t buflen; + struct hash *ret; + + if((p = wcschr(text, L':')) == NULL) + return(NULL); + *(p++) = L'\0'; + if((mbsbuf = icwcstombs(p, "US-ASCII")) == NULL) + return(NULL); + decbuf = base64decode(mbsbuf, &buflen); + free(mbsbuf); + if(decbuf == NULL) + return(NULL); + ret = newhash(text, buflen, decbuf); + free(decbuf); + return(ret); +} + +wchar_t *unparsehash(struct hash *hash) +{ + static wchar_t *buf = NULL; + wchar_t *whbuf; + char *hbuf; + size_t bufsize, bufdata; + + if(buf != NULL) + free(buf); + buf = NULL; + bufsize = bufdata = 0; + hbuf = base64encode(hash->buf, hash->len); + if((whbuf = icmbstowcs(hbuf, "US-ASCII")) == NULL) + { + flog(LOG_CRIT, "bug! could not convert base64 from us-ascii: %s", strerror(errno)); + abort(); + } + free(hbuf); + bufcat(buf, hash->algo, wcslen(hash->algo)); + addtobuf(buf, ':'); + bufcat(buf, whbuf, wcslen(whbuf)); + free(whbuf); + addtobuf(buf, 0); + return(buf); +} + +int hashcmp(struct hash *h1, struct hash *h2) +{ + if(wcscmp(h1->algo, h2->algo)) + return(0); + if(h1->len != h2->len) + return(0); + if(memcmp(h1->buf, h2->buf, h1->len)) + return(0); + return(1); +} + static struct hashcache *newhashcache(void) { struct hashcache *new; @@ -453,13 +539,10 @@ static void checkhashes(void) char *path; node = shareroot->child; - while(1) + for(node = shareroot->child; node != NULL; node = nextscnode(node)) { - if(node->child != NULL) - { - node = node->child; + if(node->f.b.type != FILE_REG) continue; - } if(!node->f.b.hastth) { if((hc = findhashcache(node->dev, node->inode)) != NULL) @@ -473,19 +556,14 @@ static void checkhashes(void) { flog(LOG_WARNING, "could not hash %s, unsharing it", path); freecache(node); + free(path); + flog(LOG_INFO, "sharing %lli bytes", sharesize); + continue; } free(path); return; } } - while(node->next == NULL) - { - if((node = node->parent) == shareroot) - break; - } - if(node == shareroot) - break; - node = node->next; } } @@ -846,7 +924,7 @@ int doscan(int quantum) } type = FILE_REG; } else { - flog(LOG_WARNING, "unhandled file type: %i", sb.st_mode); + flog(LOG_WARNING, "unhandled file type: 0%o", sb.st_mode); free(wcs); continue; } @@ -969,6 +1047,11 @@ static int init(int hup) static int run(void) { + if(hashjob == -1) + { + hashjob = 0; + checkhashes(); + } return(doscan(10)); }