+17
-13
@@ -50,9 +50,11 @@ // MegaHash v1.0 | ||
| // store key/value pair, no return value | ||
| Napi::Env env = info.Env(); | ||
| Napi::Buffer<unsigned char> keyBuf = info[0].As<Napi::Buffer<unsigned char>>(); | ||
| unsigned char *key = keyBuf.Data(); | ||
| BH_KLEN_T keyLength = (BH_KLEN_T)keyBuf.Length(); | ||
| MH_KLEN_T keyLength = (MH_KLEN_T)keyBuf.Length(); | ||
| Napi::Buffer<unsigned char> valueBuf = info[1].As<Napi::Buffer<unsigned char>>(); | ||
| unsigned char *value = valueBuf.Data(); | ||
| BH_LEN_T valueLength = (BH_LEN_T)valueBuf.Length(); | ||
| MH_LEN_T valueLength = (MH_LEN_T)valueBuf.Length(); | ||
@@ -64,4 +66,4 @@ unsigned char flags = 0; | ||
| this->hash->store( key, keyLength, value, valueLength, flags ); | ||
| return info.Env().Undefined(); | ||
| Response resp = this->hash->store( key, keyLength, value, valueLength, flags ); | ||
| return Napi::Number::New(env, (double)resp.result); | ||
| } | ||
@@ -75,8 +77,10 @@ | ||
| unsigned char *key = keyBuf.Data(); | ||
| BH_KLEN_T keyLength = (BH_KLEN_T)keyBuf.Length(); | ||
| MH_KLEN_T keyLength = (MH_KLEN_T)keyBuf.Length(); | ||
| Response resp = this->hash->fetch( key, keyLength ); | ||
| if (resp.result == BH_OK) { | ||
| if (resp.result == MH_OK) { | ||
| Napi::Buffer<unsigned char> valueBuf = Napi::Buffer<unsigned char>::Copy( env, resp.content, resp.contentLength ); | ||
| if (!valueBuf) return env.Undefined(); | ||
| if (resp.flags) valueBuf.Set( "flags", (double)resp.flags ); | ||
@@ -94,6 +98,6 @@ return valueBuf; | ||
| unsigned char *key = keyBuf.Data(); | ||
| BH_KLEN_T keyLength = (BH_KLEN_T)keyBuf.Length(); | ||
| MH_KLEN_T keyLength = (MH_KLEN_T)keyBuf.Length(); | ||
| Response resp = this->hash->fetch( key, keyLength ); | ||
| return Napi::Boolean::New(env, (resp.result == BH_OK)); | ||
| return Napi::Boolean::New(env, (resp.result == MH_OK)); | ||
| } | ||
@@ -107,6 +111,6 @@ | ||
| unsigned char *key = keyBuf.Data(); | ||
| BH_KLEN_T keyLength = (BH_KLEN_T)keyBuf.Length(); | ||
| MH_KLEN_T keyLength = (MH_KLEN_T)keyBuf.Length(); | ||
| Response resp = this->hash->remove( key, keyLength ); | ||
| return Napi::Boolean::New(env, (resp.result == BH_OK)); | ||
| return Napi::Boolean::New(env, (resp.result == MH_OK)); | ||
| } | ||
@@ -148,3 +152,3 @@ | ||
| Response resp = this->hash->firstKey(); | ||
| if (resp.result == BH_OK) { | ||
| if (resp.result == MH_OK) { | ||
| return Napi::Buffer<unsigned char>::Copy( env, resp.content, resp.contentLength ); | ||
@@ -161,6 +165,6 @@ } | ||
| unsigned char *key = keyBuf.Data(); | ||
| BH_KLEN_T keyLength = (BH_KLEN_T)keyBuf.Length(); | ||
| MH_KLEN_T keyLength = (MH_KLEN_T)keyBuf.Length(); | ||
| Response resp = this->hash->nextKey( key, keyLength ); | ||
| if (resp.result == BH_OK) { | ||
| if (resp.result == MH_OK) { | ||
| return Napi::Buffer<unsigned char>::Copy( env, resp.content, resp.contentLength ); | ||
@@ -167,0 +171,0 @@ } |
+20
-20
@@ -7,13 +7,13 @@ // MegaHash v1.0 | ||
| const BH_TYPE_BUFFER = 0; | ||
| const BH_TYPE_STRING = 1; | ||
| const BH_TYPE_NUMBER = 2; | ||
| const BH_TYPE_BOOLEAN = 3; | ||
| const BH_TYPE_OBJECT = 4; | ||
| const BH_TYPE_BIGINT = 5; | ||
| const BH_TYPE_NULL = 6; | ||
| const MH_TYPE_BUFFER = 0; | ||
| const MH_TYPE_STRING = 1; | ||
| const MH_TYPE_NUMBER = 2; | ||
| const MH_TYPE_BOOLEAN = 3; | ||
| const MH_TYPE_OBJECT = 4; | ||
| const MH_TYPE_BIGINT = 5; | ||
| const MH_TYPE_NULL = 6; | ||
| MegaHash.prototype.set = function(key, value) { | ||
| // store key/value in hash, auto-convert format to buffer | ||
| var flags = BH_TYPE_BUFFER; | ||
| var flags = MH_TYPE_BUFFER; | ||
| var keyBuf = Buffer.isBuffer(key) ? key : Buffer.from(''+key, 'utf8'); | ||
@@ -26,7 +26,7 @@ if (!keyBuf.length) throw new Error("Key must have length"); | ||
| valueBuf = Buffer.alloc(0); | ||
| flags = BH_TYPE_NULL; | ||
| flags = MH_TYPE_NULL; | ||
| } | ||
| else if (typeof(valueBuf) == 'object') { | ||
| valueBuf = Buffer.from( JSON.stringify(value) ); | ||
| flags = BH_TYPE_OBJECT; | ||
| flags = MH_TYPE_OBJECT; | ||
| } | ||
@@ -36,3 +36,3 @@ else if (typeof(valueBuf) == 'number') { | ||
| valueBuf.writeDoubleBE( value ); | ||
| flags = BH_TYPE_NUMBER; | ||
| flags = MH_TYPE_NUMBER; | ||
| } | ||
@@ -42,3 +42,3 @@ else if (typeof(valueBuf) == 'bigint') { | ||
| valueBuf.writeBigInt64BE( value ); | ||
| flags = BH_TYPE_BIGINT; | ||
| flags = MH_TYPE_BIGINT; | ||
| } | ||
@@ -48,7 +48,7 @@ else if (typeof(valueBuf) == 'boolean') { | ||
| valueBuf.writeUInt8( value ? 1 : 0 ); | ||
| flags = BH_TYPE_BOOLEAN; | ||
| flags = MH_TYPE_BOOLEAN; | ||
| } | ||
| else { | ||
| valueBuf = Buffer.from(''+value, 'utf8'); | ||
| flags = BH_TYPE_STRING; | ||
| flags = MH_TYPE_STRING; | ||
| } | ||
@@ -69,23 +69,23 @@ } | ||
| switch (value.flags) { | ||
| case BH_TYPE_NULL: | ||
| case MH_TYPE_NULL: | ||
| value = null; | ||
| break; | ||
| case BH_TYPE_OBJECT: | ||
| case MH_TYPE_OBJECT: | ||
| value = JSON.parse( value.toString() ); | ||
| break; | ||
| case BH_TYPE_NUMBER: | ||
| case MH_TYPE_NUMBER: | ||
| value = value.readDoubleBE(); break; | ||
| break; | ||
| case BH_TYPE_BIGINT: | ||
| case MH_TYPE_BIGINT: | ||
| value = value.readBigInt64BE(); break; | ||
| break; | ||
| case BH_TYPE_BOOLEAN: | ||
| case MH_TYPE_BOOLEAN: | ||
| value = (value.readUInt8() == 1) ? true : false; | ||
| break; | ||
| case BH_TYPE_STRING: | ||
| case MH_TYPE_STRING: | ||
| value = value.toString(); | ||
@@ -92,0 +92,0 @@ break; |
+66
-54
@@ -12,5 +12,5 @@ // MegaHash v1.0 | ||
| Response Hash::store(unsigned char *key, BH_KLEN_T keyLength, unsigned char *content, BH_LEN_T contentLength, unsigned char flags) { | ||
| Response Hash::store(unsigned char *key, MH_KLEN_T keyLength, unsigned char *content, MH_LEN_T contentLength, unsigned char flags) { | ||
| // store key/value pair in hash | ||
| unsigned char digest[BH_DIGEST_SIZE]; | ||
| unsigned char digest[MH_DIGEST_SIZE]; | ||
| Response resp; | ||
@@ -23,9 +23,15 @@ | ||
| // this reduces malloc bashing and memory frag | ||
| BH_LEN_T payloadSize = sizeof(Bucket) + BH_KLEN_SIZE + keyLength + BH_LEN_SIZE + contentLength; | ||
| BH_LEN_T offset = sizeof(Bucket); | ||
| MH_LEN_T payloadSize = sizeof(Bucket) + MH_KLEN_SIZE + keyLength + MH_LEN_SIZE + contentLength; | ||
| MH_LEN_T offset = sizeof(Bucket); | ||
| unsigned char *payload = (unsigned char *)malloc(payloadSize); | ||
| memcpy( (void *)&payload[offset], (void *)&keyLength, BH_KLEN_SIZE ); offset += BH_KLEN_SIZE; | ||
| // check for malloc error here | ||
| if (!payload) { | ||
| resp.result = MH_ERR; | ||
| return resp; | ||
| } | ||
| memcpy( (void *)&payload[offset], (void *)&keyLength, MH_KLEN_SIZE ); offset += MH_KLEN_SIZE; | ||
| memcpy( (void *)&payload[offset], (void *)key, keyLength ); offset += keyLength; | ||
| memcpy( (void *)&payload[offset], (void *)&contentLength, BH_LEN_SIZE ); offset += BH_LEN_SIZE; | ||
| memcpy( (void *)&payload[offset], (void *)&contentLength, MH_LEN_SIZE ); offset += MH_LEN_SIZE; | ||
| memcpy( (void *)&payload[offset], (void *)content, contentLength ); offset += contentLength; | ||
@@ -40,3 +46,3 @@ | ||
| while (tag && (tag->type == BH_SIG_INDEX)) { | ||
| while (tag && (tag->type == MH_SIG_INDEX)) { | ||
| level = (Index *)tag; | ||
@@ -52,9 +58,9 @@ ch = digest[digestIndex]; | ||
| resp.result = BH_ADD; | ||
| resp.result = MH_ADD; | ||
| stats->dataSize += keyLength + contentLength; | ||
| stats->metaSize += sizeof(Bucket) + BH_KLEN_SIZE + BH_LEN_SIZE; | ||
| stats->metaSize += sizeof(Bucket) + MH_KLEN_SIZE + MH_LEN_SIZE; | ||
| stats->numKeys++; | ||
| tag = NULL; // break | ||
| } | ||
| else if (tag->type == BH_SIG_BUCKET) { | ||
| else if (tag->type == MH_SIG_BUCKET) { | ||
| // found bucket list, append | ||
@@ -73,3 +79,3 @@ bucket = (Bucket *)tag; | ||
| resp.result = BH_REPLACE; | ||
| resp.result = MH_REPLACE; | ||
| stats->dataSize -= (bucketGetKeyLength(bucket) + bucketGetContentLength(bucket)); | ||
@@ -87,6 +93,6 @@ stats->dataSize += keyLength + contentLength; | ||
| bucket->next = newBucket; | ||
| resp.result = BH_ADD; | ||
| resp.result = MH_ADD; | ||
| stats->dataSize += keyLength + contentLength; | ||
| stats->metaSize += sizeof(Bucket) + BH_KLEN_SIZE + BH_LEN_SIZE; | ||
| stats->metaSize += sizeof(Bucket) + MH_KLEN_SIZE + MH_LEN_SIZE; | ||
| stats->numKeys++; | ||
@@ -96,7 +102,13 @@ bucket = NULL; // break | ||
| // possibly reindex here | ||
| if ((bucketIndex >= maxBuckets + (ch % reindexScatter)) && (digestIndex < BH_DIGEST_SIZE - 1)) { | ||
| if ((bucketIndex >= maxBuckets + (ch % reindexScatter)) && (digestIndex < MH_DIGEST_SIZE - 1)) { | ||
| // deeper we go | ||
| digestIndex++; | ||
| newLevel = new Index(); | ||
| newLevel = new Index(); | ||
| // check for malloc error here | ||
| if (!newLevel) { | ||
| resp.result = MH_ERR; | ||
| return resp; | ||
| } | ||
| stats->indexSize += sizeof(Index); | ||
@@ -133,4 +145,4 @@ | ||
| // reindex existing bucket into new subindex level | ||
| unsigned char digest[BH_DIGEST_SIZE]; | ||
| BH_KLEN_T keyLength = bucketGetKeyLength(bucket); | ||
| unsigned char digest[MH_DIGEST_SIZE]; | ||
| MH_KLEN_T keyLength = bucketGetKeyLength(bucket); | ||
| unsigned char *key = bucketGetKey(bucket); | ||
@@ -157,5 +169,5 @@ digestKey(key, keyLength, digest); | ||
| Response Hash::fetch(unsigned char *key, BH_KLEN_T keyLength) { | ||
| Response Hash::fetch(unsigned char *key, MH_KLEN_T keyLength) { | ||
| // fetch value given key | ||
| unsigned char digest[BH_DIGEST_SIZE]; | ||
| unsigned char digest[MH_DIGEST_SIZE]; | ||
| Response resp; | ||
@@ -176,3 +188,3 @@ | ||
| while (tag && (tag->type == BH_SIG_INDEX)) { | ||
| while (tag && (tag->type == MH_SIG_INDEX)) { | ||
| level = (Index *)tag; | ||
@@ -183,6 +195,6 @@ ch = digest[digestIndex]; | ||
| // not found | ||
| resp.result = BH_ERR; | ||
| resp.result = MH_ERR; | ||
| tag = NULL; // break | ||
| } | ||
| else if (tag->type == BH_SIG_BUCKET) { | ||
| else if (tag->type == MH_SIG_BUCKET) { | ||
| // found bucket list, append | ||
@@ -195,7 +207,7 @@ bucket = (Bucket *)tag; | ||
| bucketData = ((unsigned char *)bucket) + sizeof(Bucket); | ||
| tempCL = bucketData + BH_KLEN_SIZE + keyLength; | ||
| tempCL = bucketData + MH_KLEN_SIZE + keyLength; | ||
| resp.result = BH_OK; | ||
| resp.contentLength = ((BH_LEN_T *)tempCL)[0]; | ||
| resp.content = bucketData + BH_KLEN_SIZE + keyLength + BH_LEN_SIZE; | ||
| resp.result = MH_OK; | ||
| resp.contentLength = ((MH_LEN_T *)tempCL)[0]; | ||
| resp.content = bucketData + MH_KLEN_SIZE + keyLength + MH_LEN_SIZE; | ||
@@ -207,3 +219,3 @@ resp.flags = bucket->flags; | ||
| // not found | ||
| resp.result = BH_ERR; | ||
| resp.result = MH_ERR; | ||
| bucket = NULL; // break | ||
@@ -226,5 +238,5 @@ } | ||
| Response Hash::remove(unsigned char *key, BH_KLEN_T keyLength) { | ||
| Response Hash::remove(unsigned char *key, MH_KLEN_T keyLength) { | ||
| // remove bucket given key | ||
| unsigned char digest[BH_DIGEST_SIZE]; | ||
| unsigned char digest[MH_DIGEST_SIZE]; | ||
| Response resp; | ||
@@ -242,3 +254,3 @@ | ||
| while (tag && (tag->type == BH_SIG_INDEX)) { | ||
| while (tag && (tag->type == MH_SIG_INDEX)) { | ||
| level = (Index *)tag; | ||
@@ -249,6 +261,6 @@ ch = digest[digestIndex]; | ||
| // not found | ||
| resp.result = BH_ERR; | ||
| resp.result = MH_ERR; | ||
| tag = NULL; // break | ||
| } | ||
| else if (tag->type == BH_SIG_BUCKET) { | ||
| else if (tag->type == MH_SIG_BUCKET) { | ||
| // found bucket list, traverse | ||
@@ -262,3 +274,3 @@ bucket = (Bucket *)tag; | ||
| stats->dataSize -= (bucketGetKeyLength(bucket) + bucketGetContentLength(bucket)); | ||
| stats->metaSize -= (sizeof(Bucket) + BH_KLEN_SIZE + BH_LEN_SIZE); | ||
| stats->metaSize -= (sizeof(Bucket) + MH_KLEN_SIZE + MH_LEN_SIZE); | ||
| stats->numKeys--; | ||
@@ -269,3 +281,3 @@ | ||
| resp.result = BH_OK; | ||
| resp.result = MH_OK; | ||
| free((void *)bucket); | ||
@@ -276,3 +288,3 @@ bucket = NULL; // break | ||
| // not found | ||
| resp.result = BH_ERR; | ||
| resp.result = MH_ERR; | ||
| bucket = NULL; // break | ||
@@ -298,3 +310,3 @@ } | ||
| // clear ALL keys/values | ||
| for (int idx = 0; idx < BH_INDEX_SIZE; idx++) { | ||
| for (int idx = 0; idx < MH_INDEX_SIZE; idx++) { | ||
| if (index->data[idx]) { | ||
@@ -317,3 +329,3 @@ clearTag( index->data[idx] ); | ||
| Tag *tag = index->data[slice1]; | ||
| if (tag->type == BH_SIG_INDEX) { | ||
| if (tag->type == MH_SIG_INDEX) { | ||
| // nested index, use idx2 | ||
@@ -328,4 +340,4 @@ Index *level = (Index *)tag; | ||
| int empty = 1; | ||
| for (int idx = 0; idx < BH_INDEX_SIZE; idx++) { | ||
| if (level->data[idx]) { empty = 0; idx = BH_INDEX_SIZE; } | ||
| for (int idx = 0; idx < MH_INDEX_SIZE; idx++) { | ||
| if (level->data[idx]) { empty = 0; idx = MH_INDEX_SIZE; } | ||
| } | ||
@@ -337,3 +349,3 @@ if (empty) { | ||
| } | ||
| else if (tag->type == BH_SIG_BUCKET) { | ||
| else if (tag->type == MH_SIG_BUCKET) { | ||
| clearTag( tag ); | ||
@@ -348,7 +360,7 @@ index->data[slice1] = NULL; | ||
| // traverse lists, recurse for nested indexes | ||
| if (tag->type == BH_SIG_INDEX) { | ||
| if (tag->type == MH_SIG_INDEX) { | ||
| // traverse index | ||
| Index *level = (Index *)tag; | ||
| for (int idx = 0; idx < BH_INDEX_SIZE; idx++) { | ||
| for (int idx = 0; idx < MH_INDEX_SIZE; idx++) { | ||
| if (level->data[idx]) { | ||
@@ -364,3 +376,3 @@ clearTag( level->data[idx] ); | ||
| } | ||
| else if (tag->type == BH_SIG_BUCKET) { | ||
| else if (tag->type == MH_SIG_BUCKET) { | ||
| // delete all buckets in list | ||
@@ -375,3 +387,3 @@ Bucket *bucket = (Bucket *)tag; | ||
| stats->dataSize -= (bucketGetKeyLength(lastBucket) + bucketGetContentLength(lastBucket)); | ||
| stats->metaSize -= (sizeof(Bucket) + BH_KLEN_SIZE + BH_LEN_SIZE); | ||
| stats->metaSize -= (sizeof(Bucket) + MH_KLEN_SIZE + MH_LEN_SIZE); | ||
| stats->numKeys--; | ||
@@ -387,6 +399,6 @@ | ||
| unsigned char returnNext = 1; | ||
| unsigned char digest[BH_DIGEST_SIZE]; | ||
| unsigned char digest[MH_DIGEST_SIZE]; | ||
| Response resp; | ||
| for (int idx = 0; idx < BH_DIGEST_SIZE; idx++) { | ||
| for (int idx = 0; idx < MH_DIGEST_SIZE; idx++) { | ||
| digest[idx] = 0; | ||
@@ -399,6 +411,6 @@ } | ||
| Response Hash::nextKey(unsigned char *key, BH_KLEN_T keyLength) { | ||
| Response Hash::nextKey(unsigned char *key, MH_KLEN_T keyLength) { | ||
| // return next key given previous key (in undefined order) | ||
| unsigned char returnNext = 0; | ||
| unsigned char digest[BH_DIGEST_SIZE]; | ||
| unsigned char digest[MH_DIGEST_SIZE]; | ||
| Response resp; | ||
@@ -413,17 +425,17 @@ | ||
| void Hash::traverseTag(Response *resp, Tag *tag, unsigned char *key, BH_KLEN_T keyLength, unsigned char *digest, unsigned char digestIndex, unsigned char *returnNext) { | ||
| void Hash::traverseTag(Response *resp, Tag *tag, unsigned char *key, MH_KLEN_T keyLength, unsigned char *digest, unsigned char digestIndex, unsigned char *returnNext) { | ||
| // internal method | ||
| // traverse tag tree looking for key (or return next key found) | ||
| if (tag->type == BH_SIG_INDEX) { | ||
| if (tag->type == MH_SIG_INDEX) { | ||
| // traverse index | ||
| Index *level = (Index *)tag; | ||
| for (int idx = digest[digestIndex]; idx < BH_INDEX_SIZE; idx++) { | ||
| for (int idx = digest[digestIndex]; idx < MH_INDEX_SIZE; idx++) { | ||
| if (level->data[idx]) { | ||
| traverseTag( resp, level->data[idx], key, keyLength, digest, digestIndex + 1, returnNext ); | ||
| if (resp->result == BH_OK) idx = BH_INDEX_SIZE; | ||
| if (resp->result == MH_OK) idx = MH_INDEX_SIZE; | ||
| } | ||
| } | ||
| } | ||
| else if (tag->type == BH_SIG_BUCKET) { | ||
| else if (tag->type == MH_SIG_BUCKET) { | ||
| // traverse bucket list | ||
@@ -435,3 +447,3 @@ Bucket *bucket = (Bucket *)tag; | ||
| // return whatever key we landed on (repurpose the response content for this) | ||
| resp->result = BH_OK; | ||
| resp->result = MH_OK; | ||
| resp->content = bucketGetKey(bucket); | ||
@@ -438,0 +450,0 @@ resp->contentLength = bucketGetKeyLength(bucket); |
+34
-34
@@ -13,15 +13,15 @@ // MegaHash v1.0 | ||
| /** Type used for key lengths. */ | ||
| #define BH_KLEN_T uint16_t | ||
| #define MH_KLEN_T uint16_t | ||
| /** Type used for value lengths. */ | ||
| #define BH_LEN_T uint32_t | ||
| #define MH_LEN_T uint32_t | ||
| /** Length of BH_KLEN_T and BH_LEN_T. */ | ||
| #define BH_KLEN_SIZE sizeof(BH_KLEN_T) | ||
| #define BH_LEN_SIZE sizeof(BH_LEN_T) | ||
| /** Length of MH_KLEN_T and MH_LEN_T. */ | ||
| #define MH_KLEN_SIZE sizeof(MH_KLEN_T) | ||
| #define MH_LEN_SIZE sizeof(MH_LEN_T) | ||
| /** Size of one hashed key, in bytes. */ | ||
| #define BH_DIGEST_SIZE 8 | ||
| #define MH_DIGEST_SIZE 8 | ||
| /** Size of one index level. */ | ||
| #define BH_INDEX_SIZE 16 | ||
| #define MH_INDEX_SIZE 16 | ||
@@ -32,9 +32,9 @@ /** \name Result codes after pair is stored or fetched: | ||
| /** Error occured during operation. */ | ||
| #define BH_ERR 0 | ||
| #define MH_ERR 0 | ||
| /** Result was okay (used only in fetch()). */ | ||
| #define BH_OK 1 | ||
| #define MH_OK 1 | ||
| /** Result was add (key was unique). */ | ||
| #define BH_ADD 1 | ||
| #define MH_ADD 1 | ||
| /** Result was replace (key existed and value was overwritten). */ | ||
| #define BH_REPLACE 2 | ||
| #define MH_REPLACE 2 | ||
| //@} | ||
@@ -45,5 +45,5 @@ | ||
| /** Signature used for identifying index tags. */ | ||
| #define BH_SIG_INDEX 'I' | ||
| #define MH_SIG_INDEX 'I' | ||
| /** Signature used for identifying bucket tags. */ | ||
| #define BH_SIG_BUCKET 'B' | ||
| #define MH_SIG_BUCKET 'B' | ||
| //@} | ||
@@ -73,3 +73,3 @@ | ||
| unsigned char *content; /**< Pointer to content. Can be binary or string. */ | ||
| BH_LEN_T contentLength; /**< Length of content. */ | ||
| MH_LEN_T contentLength; /**< Length of content. */ | ||
@@ -97,3 +97,3 @@ Response() { | ||
| // each slot may point to another index, or a bucket linked list | ||
| Tag *data[BH_INDEX_SIZE]; | ||
| Tag *data[MH_INDEX_SIZE]; | ||
@@ -105,4 +105,4 @@ Index() { | ||
| void init() { | ||
| type = BH_SIG_INDEX; | ||
| for (int idx = 0; idx < BH_INDEX_SIZE; idx++) data[idx] = NULL; | ||
| type = MH_SIG_INDEX; | ||
| for (int idx = 0; idx < MH_INDEX_SIZE; idx++) data[idx] = NULL; | ||
| } | ||
@@ -124,3 +124,3 @@ }; | ||
| void init() { | ||
| type = BH_SIG_BUCKET; | ||
| type = MH_SIG_BUCKET; | ||
| flags = 0; | ||
@@ -177,7 +177,7 @@ next = NULL; | ||
| // public methods: | ||
| Response store(unsigned char *key, BH_KLEN_T keyLength, unsigned char *content, BH_LEN_T contentLength, unsigned char flags = 0); | ||
| Response fetch(unsigned char *key, BH_KLEN_T keyLength); | ||
| Response remove(unsigned char *key, BH_KLEN_T keyLength); | ||
| Response store(unsigned char *key, MH_KLEN_T keyLength, unsigned char *content, MH_LEN_T contentLength, unsigned char flags = 0); | ||
| Response fetch(unsigned char *key, MH_KLEN_T keyLength); | ||
| Response remove(unsigned char *key, MH_KLEN_T keyLength); | ||
| Response firstKey(); | ||
| Response nextKey(unsigned char *key, BH_KLEN_T keyLength); | ||
| Response nextKey(unsigned char *key, MH_KLEN_T keyLength); | ||
@@ -190,16 +190,16 @@ void clear(); | ||
| void reindexBucket(Bucket *bucket, Index *index, unsigned char digestIndex); | ||
| void traverseTag(Response *resp, Tag *tag, unsigned char *key, BH_KLEN_T keyLength, unsigned char *digest, unsigned char digestIndex, unsigned char *returnNext); | ||
| void traverseTag(Response *resp, Tag *tag, unsigned char *key, MH_KLEN_T keyLength, unsigned char *digest, unsigned char digestIndex, unsigned char *returnNext); | ||
| int bucketKeyEquals(Bucket *bucket, unsigned char *key, BH_KLEN_T keyLength) { | ||
| int bucketKeyEquals(Bucket *bucket, unsigned char *key, MH_KLEN_T keyLength) { | ||
| // compare key to bucket key | ||
| unsigned char *bucketData = ((unsigned char *)bucket) + sizeof(Bucket); | ||
| if (keyLength != ((BH_KLEN_T *)bucketData)[0]) return 0; | ||
| unsigned char *bucketKey = bucketData + BH_KLEN_SIZE; | ||
| if (keyLength != ((MH_KLEN_T *)bucketData)[0]) return 0; | ||
| unsigned char *bucketKey = bucketData + MH_KLEN_SIZE; | ||
| return (int)!memcmp( (void *)key, (void *)bucketKey, (size_t)keyLength ); | ||
| } | ||
| BH_KLEN_T bucketGetKeyLength(Bucket *bucket) { | ||
| MH_KLEN_T bucketGetKeyLength(Bucket *bucket) { | ||
| // get bucket key length | ||
| unsigned char *bucketData = ((unsigned char *)bucket) + sizeof(Bucket); | ||
| BH_KLEN_T *tempKL = (BH_KLEN_T *)bucketData; | ||
| MH_KLEN_T *tempKL = (MH_KLEN_T *)bucketData; | ||
| return tempKL[0]; | ||
@@ -211,10 +211,10 @@ } | ||
| unsigned char *bucketData = ((unsigned char *)bucket) + sizeof(Bucket); | ||
| return bucketData + BH_KLEN_SIZE; | ||
| return bucketData + MH_KLEN_SIZE; | ||
| } | ||
| BH_LEN_T bucketGetContentLength(Bucket *bucket) { | ||
| MH_LEN_T bucketGetContentLength(Bucket *bucket) { | ||
| // get bucket content (value) length | ||
| unsigned char *bucketData = ((unsigned char *)bucket) + sizeof(Bucket); | ||
| unsigned char *tempCL = bucketData + BH_KLEN_SIZE + ((BH_KLEN_T *)bucketData)[0]; | ||
| return ((BH_LEN_T *)tempCL)[0]; | ||
| unsigned char *tempCL = bucketData + MH_KLEN_SIZE + ((MH_KLEN_T *)bucketData)[0]; | ||
| return ((MH_LEN_T *)tempCL)[0]; | ||
| } | ||
@@ -225,6 +225,6 @@ | ||
| unsigned char *bucketData = ((unsigned char *)bucket) + sizeof(Bucket); | ||
| return bucketData + BH_KLEN_SIZE + ((BH_KLEN_T *)bucketData)[0] + BH_LEN_SIZE; | ||
| return bucketData + MH_KLEN_SIZE + ((MH_KLEN_T *)bucketData)[0] + MH_LEN_SIZE; | ||
| } | ||
| void digestKey(unsigned char *key, BH_KLEN_T keyLength, unsigned char *digest) { | ||
| void digestKey(unsigned char *key, MH_KLEN_T keyLength, unsigned char *digest) { | ||
| // Create 32-bit digest of custom key using DJB2 algorithm. | ||
@@ -231,0 +231,0 @@ // Return as 8 separate bytes (4 bits each) in unsigned char array |
+1
-1
| { | ||
| "name": "megahash", | ||
| "version": "1.0.2", | ||
| "version": "1.0.3", | ||
| "description": "A super-fast C++ hash table with Node.js wrapper.", | ||
@@ -5,0 +5,0 @@ "author": "Joseph Huckaby <jhuckaby@gmail.com>", |
+23
-1
@@ -21,2 +21,3 @@ <details><summary>Table of Contents</summary> | ||
| * [Iterating over Keys](#iterating-over-keys) | ||
| * [Error Handling](#error-handling) | ||
| * [Hash Stats](#hash-stats) | ||
@@ -252,2 +253,13 @@ - [API](#api) | ||
| ## Error Handling | ||
| If a hash operation fails (i.e. out of memory), then [set()](#set) will return `0`. You can check for this and bubble up your own error. Example: | ||
| ```js | ||
| var result = hash.set( "hello", "there" ); | ||
| if (!result) { | ||
| throw new Error("Failed to write to MegaHash: Out of memory"); | ||
| } | ||
| ``` | ||
| ## Hash Stats | ||
@@ -291,3 +303,3 @@ | ||
| ``` | ||
| VOID set( KEY, VALUE ) | ||
| NUMBER set( KEY, VALUE ) | ||
| ``` | ||
@@ -301,2 +313,10 @@ | ||
| The `set()` method actually returns a number, which will be `0`, `1` or `2`. They each have a different meaning: | ||
| | Result | Description | | ||
| |--------|-------------| | ||
| | `0` | An error occurred (out of memory). | | ||
| | `1` | A key was added to the hash (i.e. unique key). | | ||
| | `2` | An existing key was replaced in the hash. | | ||
| ## get | ||
@@ -314,2 +334,4 @@ | ||
| If the key is not found, `get()` will return `undefined`. | ||
| ## has | ||
@@ -316,0 +338,0 @@ |
+8
-0
@@ -213,2 +213,10 @@ // Unit tests for MegaHash | ||
| function testSetReturnValue(test) { | ||
| // make sure set() returns the expected return values | ||
| var hash = new MegaHash(); | ||
| test.ok( hash.set("hello", "there") == 1, "Unique key returns 1 on set" ); | ||
| test.ok( hash.set("hello", "there") == 2, "Replaced key returns 2 on set" ); | ||
| test.done(); | ||
| }, | ||
| function testRemove(test) { | ||
@@ -215,0 +223,0 @@ var hash = new MegaHash(); |
Network access
Supply chain riskThis module accesses the network.
Found 1 instance in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
Network access
Supply chain riskThis module accesses the network.
Found 1 instance in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
66538
2.08%586
1.21%469
4.92%