Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

humanhash3

Package Overview
Dependencies
Maintainers
1
Versions
4
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

humanhash3 - npm Package Compare versions

Comparing version
0.0.5
to
0.0.6
+30
-32
humanhash.py

@@ -10,5 +10,6 @@ """

import uuid as uuidlib
import math
import sys
if sys.version_info.major == 3:
if sys.version_info.major == 3: # pragma: nocover
# Map returns an iterator in PY3K

@@ -23,7 +24,3 @@ py3_map = map

# Reduce moved to functools
# http://www.artima.com/weblogs/viewpost.jsp?thread=98196
from functools import reduce
DEFAULT_WORDLIST = (

@@ -68,8 +65,2 @@ 'ack', 'alabama', 'alanine', 'alaska', 'alpha', 'angel', 'apart', 'april',

# Use a simple XOR checksum-like function for compression.
# checksum = lambda _bytes: reduce(operator.xor, _bytes, 0)
def checksum(checksum_bytes):
return reduce(operator.xor, checksum_bytes, 0)
class HumanHasher(object):

@@ -109,3 +100,3 @@

>>> HumanHasher().humanize_list(digest)
['sodium', 'magnesium', 'nineteen', 'hydrogen']
['equal', 'monkey', 'lake', 'beryllium']
"""

@@ -129,7 +120,7 @@ # Gets a list of byte values between 0-255.

>>> HumanHasher().humanize(digest)
'sodium-magnesium-nineteen-hydrogen'
'equal-monkey-lake-beryllium'
>>> HumanHasher().humanize(digest, words=6)
'hydrogen-pasta-mississippi-august-may-lithium'
'sodium-magnesium-nineteen-william-alanine-nebraska'
>>> HumanHasher().humanize(digest, separator='*')
'sodium*magnesium*nineteen*hydrogen'
'equal*monkey*lake*beryllium'
"""

@@ -147,11 +138,8 @@ # Map the compressed byte values through the word list.

>>> list(HumanHasher.compress(bytes_, 4))
[205, 128, 156, 96]
[64, 145, 117, 21]
Attempting to compress a smaller number of bytes to a larger number is
an error:
If there are less than the target number bytes, return input bytes
>>> HumanHasher.compress(bytes_, 15) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Fewer input bytes than requested output
>>> list(HumanHasher.compress(bytes_, 15)) # doctest: +ELLIPSIS
[96, 173, 141, 13, 135, 27, 96, 149, 128, 130, 151]
"""

@@ -162,14 +150,24 @@

length = len(bytes_list)
if target > length:
raise ValueError("Fewer input bytes than requested output")
# If there are less than the target number bytes, return input bytes
if target >= length:
return bytes_
# Split `bytes` into `target` segments.
seg_size = length // target
segments = [bytes_list[i * seg_size:(i + 1) * seg_size]
for i in range(target)]
# Catch any left-over bytes in the last segment.
segments[-1].extend(bytes_list[target * seg_size:])
# Split `bytes` evenly into `target` segments
# Each segment hashes `seg_size` bytes, rounded down for some
seg_size = float(length) / float(target)
# Initialize `target` number of segments
segments = [0] * target
seg_num = 0
return map(checksum, segments)
# Use a simple XOR checksum-like function for compression
for i, byte in enumerate(bytes_list):
# Divide the byte index by the segment size to assign its segment
# Floor to create a valid segment index
# Min to ensure the index is within `target`
seg_num = min(int(math.floor(i / seg_size)), target-1)
# Apply XOR to the existing segment and the byte
segments[seg_num] = operator.xor(segments[seg_num], byte)
return segments
def uuid(self, **params):

@@ -202,3 +200,3 @@

if __name__ == "__main__":
if __name__ == "__main__": # pragma: nocover
import doctest

@@ -205,0 +203,0 @@ # http://stackoverflow.com/a/25691978/6461688

Metadata-Version: 1.1
Name: humanhash3
Version: 0.0.5
Version: 0.0.6
Summary: Human-readable representations of digests.

@@ -5,0 +5,0 @@ Home-page: https://github.com/blag/humanhash

@@ -11,3 +11,3 @@ #!/usr/bin/env python

name='humanhash3',
version='0.0.5',
version='0.0.6',
description='Human-readable representations of digests.',

@@ -14,0 +14,0 @@ long_description=long_description,