Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

targetran

Package Overview
Dependencies
Maintainers
1
Versions
65
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

targetran - npm Package Compare versions

Comparing version
0.12.0
to
0.12.1
tests/__init__.py
+588
"""
Unit test.
"""
import unittest
import numpy as np
import tensorflow as tf # type: ignore
from targetran.np import (
flip_left_right,
flip_up_down,
rotate,
shear,
translate,
crop,
)
from targetran.tf import (
to_tf,
seqs_to_tf_dataset,
tf_flip_left_right,
tf_flip_up_down,
tf_rotate,
tf_shear,
tf_translate,
tf_crop,
)
from targetran.utils import Interpolation
ORIGINAL_IMAGE_SEQ = [
np.array([
[[1], [2], [3]],
[[4], [5], [6]],
[[7], [8], [9]],
[[10], [11], [12]]
]),
np.array([
[[11], [12], [13]],
[[14], [15], [16]],
[[17], [18], [19]],
[[20], [21], [22]]
]),
np.array([
[[21], [22], [23]],
[[24], [25], [26]],
[[27], [28], [29]],
[[30], [31], [32]]
]),
]
ORIGINAL_BBOXES_SEQ = [
np.array([
[1, 0, 2, 2],
[0, 1, 3, 2],
]),
np.array([
[0, 0, 2, 3],
]),
np.array([]),
]
ORIGINAL_LABELS_SEQ = [
np.array([0, 1]),
np.array([2]),
np.array([]),
]
(
TF_ORIGINAL_IMAGE_SEQ, TF_ORIGINAL_BBOXES_SEQ, TF_ORIGINAL_LABELS_SEQ
) = to_tf(
ORIGINAL_IMAGE_SEQ, ORIGINAL_BBOXES_SEQ, ORIGINAL_LABELS_SEQ
)
class TestTransform(unittest.TestCase):
def test_flip_left_right(self) -> None:
expected_image_seq = [
np.array([
[[3], [2], [1]],
[[6], [5], [4]],
[[9], [8], [7]],
[[12], [11], [10]]
], dtype=np.float32),
np.array([
[[13], [12], [11]],
[[16], [15], [14]],
[[19], [18], [17]],
[[22], [21], [20]]
], dtype=np.float32),
np.array([
[[23], [22], [21]],
[[26], [25], [24]],
[[29], [28], [27]],
[[32], [31], [30]]
], dtype=np.float32),
]
expected_bboxes_seq = [
np.array([
[0, 0, 2, 2],
[0, 1, 3, 2],
], dtype=np.float32),
np.array([
[1, 0, 2, 3],
], dtype=np.float32),
np.array([], dtype=np.float32).reshape(-1, 4),
]
expected_labels_seq = ORIGINAL_LABELS_SEQ
# NumPy.
for i in range(len(ORIGINAL_IMAGE_SEQ)):
image, bboxes, labels = flip_left_right(
ORIGINAL_IMAGE_SEQ[i],
ORIGINAL_BBOXES_SEQ[i],
ORIGINAL_LABELS_SEQ[i]
)
self.assertTrue(
np.array_equal(expected_image_seq[i], image)
)
self.assertTrue(
np.array_equal(expected_bboxes_seq[i], bboxes)
)
self.assertTrue(
np.array_equal(expected_labels_seq[i], labels)
)
# TF.
(
tf_expected_image_seq,
tf_expected_bboxes_seq,
tf_expected_labels_seq
) = to_tf(
expected_image_seq, expected_bboxes_seq, expected_labels_seq
)
for i in range(len(TF_ORIGINAL_LABELS_SEQ)):
tf_image, tf_bboxes, tf_labels = tf_flip_left_right(
TF_ORIGINAL_IMAGE_SEQ[i],
TF_ORIGINAL_BBOXES_SEQ[i],
TF_ORIGINAL_LABELS_SEQ[i]
)
self.assertTrue(
np.array_equal(tf_expected_image_seq[i].numpy(),
tf_image.numpy())
)
self.assertTrue(
np.array_equal(tf_expected_bboxes_seq[i].numpy(),
tf_bboxes.numpy())
)
self.assertTrue(
np.array_equal(tf_expected_labels_seq[i].numpy(),
tf_labels.numpy())
)
def test_flip_up_down(self) -> None:
expected_image_seq = [
np.array([
[[10], [11], [12]],
[[7], [8], [9]],
[[4], [5], [6]],
[[1], [2], [3]]
], dtype=np.float32),
np.array([
[[20], [21], [22]],
[[17], [18], [19]],
[[14], [15], [16]],
[[11], [12], [13]]
], dtype=np.float32),
np.array([
[[30], [31], [32]],
[[27], [28], [29]],
[[24], [25], [26]],
[[21], [22], [23]]
], dtype=np.float32),
]
expected_bboxes_seq = [
np.array([
[1, 2, 2, 2],
[0, 1, 3, 2],
], dtype=np.float32),
np.array([
[0, 1, 2, 3],
], dtype=np.float32),
np.array([], dtype=np.float32).reshape(-1, 4),
]
expected_labels_seq = ORIGINAL_LABELS_SEQ
# NumPy.
for i in range(len(ORIGINAL_IMAGE_SEQ)):
image, bboxes, labels = flip_up_down(
ORIGINAL_IMAGE_SEQ[i],
ORIGINAL_BBOXES_SEQ[i],
ORIGINAL_LABELS_SEQ[i]
)
self.assertTrue(
np.array_equal(expected_image_seq[i], image)
)
self.assertTrue(
np.array_equal(expected_bboxes_seq[i], bboxes)
)
self.assertTrue(
np.array_equal(expected_labels_seq[i], labels)
)
# TF.
(
tf_expected_image_seq,
tf_expected_bboxes_seq,
tf_expected_labels_seq
) = to_tf(
expected_image_seq, expected_bboxes_seq, expected_labels_seq
)
for i in range(len(TF_ORIGINAL_LABELS_SEQ)):
tf_image, tf_bboxes, tf_labels = tf_flip_up_down(
TF_ORIGINAL_IMAGE_SEQ[i],
TF_ORIGINAL_BBOXES_SEQ[i],
TF_ORIGINAL_LABELS_SEQ[i]
)
self.assertTrue(
np.array_equal(tf_expected_image_seq[i].numpy(),
tf_image.numpy())
)
self.assertTrue(
np.array_equal(tf_expected_bboxes_seq[i].numpy(),
tf_bboxes.numpy())
)
self.assertTrue(
np.array_equal(tf_expected_labels_seq[i].numpy(),
tf_labels.numpy())
)
def test_rotate(self) -> None:
original_image_seq = [
np.array([
[[1], [2], [3]],
[[4], [5], [6]],
[[7], [8], [9]]
], dtype=np.float32),
np.array([
[[10], [11], [12], [13]],
[[14], [15], [16], [17]],
[[18], [19], [20], [21]],
[[22], [23], [24], [25]]
], dtype=np.float32),
]
original_bboxes_seq = [
np.array([
[1, 0, 2, 2],
[0, 1, 3, 2],
], dtype=np.float32),
np.array([], dtype=np.float32).reshape(-1, 4),
]
original_labels_seq = [
np.array([1, 2], dtype=np.float32),
np.array([], dtype=np.float32),
]
angles_deg = [90.0, 180.0]
expected_image_seq = [
np.array([
[[3], [6], [9]],
[[2], [5], [8]],
[[1], [4], [7]]
], dtype=np.float32),
np.array([
[[25], [24], [23], [22]],
[[21], [20], [19], [18]],
[[17], [16], [15], [14]],
[[13], [12], [11], [10]]
], dtype=np.float32),
]
expected_bboxes_seq = [
np.array([
[0, 0, 2, 2],
[1, 0, 2, 3],
], dtype=np.float32),
np.array([], dtype=np.float32).reshape(-1, 4),
]
expected_labels_seq = original_labels_seq
# NumPy.
for i in range(len(original_image_seq)):
image, bboxes, labels = rotate(
original_image_seq[i],
original_bboxes_seq[i],
original_labels_seq[i],
angles_deg[i],
Interpolation.NEAREST
)
self.assertTrue(
np.array_equal(expected_image_seq[i], image)
)
self.assertTrue(
np.array_equal(expected_bboxes_seq[i], bboxes)
)
self.assertTrue(
np.array_equal(expected_labels_seq[i], labels)
)
# TF.
(
tf_original_image_seq,
tf_original_bboxes_seq,
tf_original_labels_seq
) = to_tf(
original_image_seq, original_bboxes_seq, original_labels_seq
)
(
tf_expected_image_seq,
tf_expected_bboxes_seq,
tf_expected_labels_seq
) = to_tf(
expected_image_seq, expected_bboxes_seq, expected_labels_seq
)
for i in range(len(tf_original_image_seq)):
tf_image, tf_bboxes, tf_labels = tf_rotate(
tf_original_image_seq[i],
tf_original_bboxes_seq[i],
tf_original_labels_seq[i],
angles_deg[i],
Interpolation.NEAREST
)
self.assertTrue(
np.allclose(tf_expected_image_seq[i].numpy(),
tf_image.numpy())
)
self.assertTrue(
np.allclose(tf_expected_bboxes_seq[i].numpy(),
tf_bboxes.numpy())
)
self.assertTrue(
np.allclose(tf_expected_labels_seq[i].numpy(),
tf_labels.numpy())
)
def test_shear(self) -> None:
dummy_image = np.random.rand(32, 32, 3)
original_bboxes = np.array([
[15, 15, 2, 2],
[15, 0, 2, 2],
], dtype=np.float32)
original_labels = np.array([[0], [1]], dtype=np.float32)
angle_deg = 45.0
expected_bboxes = np.array([
[14, 15, 3, 2],
[0, 0, 3, 2],
], dtype=np.float32)
expected_labels = original_labels
# NumPy.
_, bboxes, labels = shear(
dummy_image, original_bboxes, original_labels,
angle_deg, Interpolation.NEAREST
)
self.assertTrue(np.array_equal(expected_bboxes, bboxes))
self.assertTrue(np.array_equal(expected_labels, labels))
# TF.
tf_expected_bboxes = tf.convert_to_tensor(
expected_bboxes, dtype=tf.float32
)
tf_expected_labels = tf.convert_to_tensor(
expected_labels, dtype=tf.float32
)
_, tf_bboxes, tf_labels = tf_shear(
tf.convert_to_tensor(dummy_image, dtype=tf.float32),
tf.convert_to_tensor(original_bboxes, dtype=tf.float32),
tf.convert_to_tensor(original_labels, dtype=tf.float32),
angle_deg, Interpolation.NEAREST
)
self.assertTrue(
np.allclose(tf_expected_bboxes.numpy(), tf_bboxes.numpy())
)
self.assertTrue(
np.allclose(tf_expected_labels.numpy(), tf_labels.numpy())
)
def test_translate(self) -> None:
translate_heights = [-1, 0, 1]
translate_widths = [0, 1, 1]
expected_image_seq = [
np.array([
[[4], [5], [6]],
[[7], [8], [9]],
[[10], [11], [12]],
[[0], [0], [0]]
], dtype=np.float32),
np.array([
[[0], [11], [12]],
[[0], [14], [15]],
[[0], [17], [18]],
[[0], [20], [21]]
], dtype=np.float32),
np.array([
[[0], [0], [0]],
[[0], [21], [22]],
[[0], [24], [25]],
[[0], [27], [28]]
], dtype=np.float32),
]
expected_bboxes_seq = [
np.array([
[1, 0, 2, 1],
[0, 0, 3, 2],
], dtype=np.float32),
np.array([
[1, 0, 2, 3],
], dtype=np.float32),
np.array([], dtype=np.float32).reshape(-1, 4),
]
expected_labels_seq = [
np.array([0, 1], dtype=np.float32),
np.array([2], dtype=np.float32),
np.array([], dtype=np.float32),
]
# NumPy.
for i in range(len(ORIGINAL_IMAGE_SEQ)):
image, bboxes, labels = translate(
ORIGINAL_IMAGE_SEQ[i],
ORIGINAL_BBOXES_SEQ[i],
ORIGINAL_LABELS_SEQ[i],
translate_heights[i], translate_widths[i],
Interpolation.NEAREST
)
self.assertTrue(np.array_equal(expected_image_seq[i], image))
self.assertTrue(np.array_equal(expected_bboxes_seq[i], bboxes))
self.assertTrue(np.array_equal(expected_labels_seq[i], labels))
# TF.
(
tf_expected_image_seq,
tf_expected_bboxes_seq,
tf_expected_labels_seq
) = to_tf(
expected_image_seq, expected_bboxes_seq, expected_labels_seq
)
for i in range(len(TF_ORIGINAL_LABELS_SEQ)):
tf_image, tf_bboxes, tf_labels = tf_translate(
TF_ORIGINAL_IMAGE_SEQ[i],
TF_ORIGINAL_BBOXES_SEQ[i],
TF_ORIGINAL_LABELS_SEQ[i],
translate_heights[i], translate_widths[i],
Interpolation.NEAREST
)
self.assertTrue(
np.array_equal(tf_expected_image_seq[i].numpy(),
tf_image.numpy())
)
self.assertTrue(
np.array_equal(tf_expected_bboxes_seq[i].numpy(),
tf_bboxes.numpy())
)
self.assertTrue(
np.array_equal(tf_expected_labels_seq[i].numpy(),
tf_labels.numpy())
)
def test_crop(self) -> None:
dummy_image_seq = [np.random.rand(128, 128, 3) for _ in range(4)]
original_bboxes_seq = [
np.array([
[64, 52, 20, 24],
[44, 48, 12, 8],
], dtype=np.float32),
np.array([
[24, 12, 20, 24],
[108, 120, 12, 8],
], dtype=np.float32),
np.array([
[108, 120, 12, 8],
], dtype=np.float32),
np.array([], dtype=np.float32).reshape(-1, 4),
]
original_labels_seq = [
np.array([0, 1], dtype=np.float32),
np.array([2, 1], dtype=np.float32),
np.array([0], dtype=np.float32),
np.array([], dtype=np.float32),
]
offset_heights = [128 * 0.25, 0.0, 0.0, 99.0]
offset_widths = [128 * 0.25, 0.0, 0.0, 59.0]
crop_heights = [128 * 0.75] * 4
crop_widths = [128 * 0.75] * 4
expected_bboxes_seq = [
np.array([
[32, 20, 20, 24],
[12, 16, 12, 8],
], dtype=np.float32),
np.array([
[24, 12, 20, 24],
], dtype=np.float32),
np.array([], dtype=np.float32).reshape(-1, 4),
np.array([], dtype=np.float32).reshape(-1, 4),
]
expected_labels_seq = [
np.array([0, 1], dtype=np.float32),
np.array([2], dtype=np.float32),
np.array([], dtype=np.float32),
np.array([], dtype=np.float32),
]
# NumPy.
for i in range(len(dummy_image_seq)):
_, bboxes, labels = crop(
dummy_image_seq[i],
original_bboxes_seq[i],
original_labels_seq[i],
int(offset_heights[i]), int(offset_widths[i]),
int(crop_heights[i]), int(crop_widths[i])
)
self.assertTrue(
np.allclose(expected_bboxes_seq[i], bboxes)
)
self.assertTrue(
np.allclose(expected_labels_seq[i], labels)
)
# TF.
(
tf_dummy_image_seq,
tf_original_bboxes_seq,
tf_original_labels_seq
) = to_tf(
dummy_image_seq, original_bboxes_seq, original_labels_seq
)
_, tf_expected_bboxes_seq, tf_expected_labels_seq = to_tf(
dummy_image_seq, expected_bboxes_seq, expected_labels_seq
)
for i in range(len(tf_dummy_image_seq)):
_, tf_bboxes, tf_labels = tf_crop(
tf_dummy_image_seq[i],
tf_original_bboxes_seq[i],
tf_original_labels_seq[i],
int(offset_heights[i]), int(offset_widths[i]),
int(crop_heights[i]), int(crop_widths[i])
)
self.assertTrue(
np.allclose(tf_expected_bboxes_seq[i].numpy(),
tf_bboxes.numpy())
)
self.assertTrue(
np.allclose(tf_expected_labels_seq[i].numpy(),
tf_labels.numpy())
)
class TestConversion(unittest.TestCase):
def test_seqs_to_tf_dataset(self) -> None:
ds = seqs_to_tf_dataset(
ORIGINAL_IMAGE_SEQ, ORIGINAL_BBOXES_SEQ, ORIGINAL_LABELS_SEQ
)
for i, (image, bboxes, labels) in enumerate(ds):
self.assertTrue(
np.allclose(ORIGINAL_IMAGE_SEQ[i], image.numpy())
)
self.assertTrue(
np.allclose(ORIGINAL_BBOXES_SEQ[i], bboxes.numpy())
)
self.assertTrue(
np.allclose(ORIGINAL_LABELS_SEQ[i], labels.numpy())
)
ds_image_only = seqs_to_tf_dataset(
ORIGINAL_IMAGE_SEQ, [], []
)
for i, (image, _, _) in enumerate(ds_image_only):
self.assertTrue(
np.allclose(ORIGINAL_IMAGE_SEQ[i], image.numpy())
)
if __name__ == "__main__":
unittest.main()
#!/usr/bin/env python3
"""
PyTorch Dataset test.
"""
from typing import Optional, Sequence, Tuple
import numpy as np
import numpy.typing
from torch.utils.data import Dataset, DataLoader
import targetran.np
from targetran.utils import Compose, collate_fn
NDAnyArray = np.typing.NDArray[np.float_]
def make_np_data() -> Tuple[Sequence[NDAnyArray],
Sequence[NDAnyArray],
Sequence[NDAnyArray]]:
image_seq = [np.random.rand(480, 512, 3) for _ in range(3)]
bboxes_seq = [
np.array([
[214, 223, 10, 11],
[345, 230, 21, 9],
]),
np.array([]),
np.array([
[104, 151, 22, 10],
[99, 132, 20, 15],
[340, 220, 31, 12],
]),
]
labels_seq = [
np.array([0, 1]),
np.array([]),
np.array([2, 3, 0]),
]
return image_seq, bboxes_seq, labels_seq
class PTDataset(Dataset):
def __init__(
self,
image_seq: Sequence[NDAnyArray],
bboxes_seq: Sequence[NDAnyArray],
labels_seq: Sequence[NDAnyArray],
transforms: Optional[Compose]
) -> None:
self.image_seq = image_seq
self.bboxes_seq = bboxes_seq
self.labels_seq = labels_seq
self.transforms = transforms
def __len__(self) -> int:
return len(self.image_seq)
def __getitem__(
self,
idx: int
) -> Tuple[NDAnyArray, NDAnyArray, NDAnyArray]:
if self.transforms:
return self.transforms(
self.image_seq[idx],
self.bboxes_seq[idx],
self.labels_seq[idx]
)
return (
self.image_seq[idx],
self.bboxes_seq[idx],
self.labels_seq[idx]
)
def main() -> None:
image_seq, bboxes_seq, labels_seq = make_np_data()
transforms = Compose([
targetran.np.RandomRotate(probability=1.0),
targetran.np.RandomShear(probability=1.0),
targetran.np.RandomTranslate(probability=1.0),
targetran.np.RandomFlipUpDown(probability=1.0),
targetran.np.RandomFlipLeftRight(probability=1.0),
targetran.np.RandomCrop(probability=1.0),
targetran.np.Resize((256, 256)),
])
print("-------- Random transform --------")
ds = PTDataset(image_seq, bboxes_seq, labels_seq, transforms)
for sample in ds:
image, bboxes, labels = sample
print(f"transformed image shape: {image.shape}")
print(f"transformed bboxes shape: {bboxes.shape}")
print(f"transformed bboxes: {bboxes.tolist()}")
print(f"transformed labels shape: {labels.shape}")
print(f"transformed labels: {labels.tolist()}")
print("=========")
print("-------- Batching --------")
data_loader = DataLoader(ds, batch_size=2, collate_fn=collate_fn)
for batch in data_loader:
image_tuple, bboxes_tuple, labels_tuple = batch
print(f"transformed image batch size: {len(image_tuple)}")
print(f"transformed bboxes batch size: {len(bboxes_tuple)}")
print(f"transformed labels batch size: {len(labels_tuple)}")
print(f"image shapes: {[i.shape for i in image_tuple]}")
print(f"bboxes shapes: {[b.shape for b in bboxes_tuple]}")
print(f"labels shapes: {[l.shape for l in labels_tuple]}")
print("=========")
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
TensorFlow Dataset test.
"""
from typing import Sequence, Tuple
import numpy as np
import numpy.typing
import targetran.tf
NDAnyArray = np.typing.NDArray[np.float_]
def make_np_data() -> Tuple[Sequence[NDAnyArray],
Sequence[NDAnyArray],
Sequence[NDAnyArray]]:
image_seq = [np.random.rand(480, 512, 3) for _ in range(3)]
bboxes_seq = [
np.array([
[214, 223, 10, 11],
[345, 230, 21, 9],
]),
np.array([]),
np.array([
[104, 151, 22, 10],
[99, 132, 20, 15],
[340, 220, 31, 12],
]),
]
labels_seq = [
np.array([0, 1]),
np.array([]),
np.array([2, 3, 0]),
]
return image_seq, bboxes_seq, labels_seq
def main() -> None:
image_seq, bboxes_seq, labels_seq = make_np_data()
ds = targetran.tf.seqs_to_tf_dataset(image_seq, bboxes_seq, labels_seq)
print("-------- Raw data --------")
for sample in ds:
image, bboxes, labels = sample
print(f"image shape: {image.get_shape()}")
print(f"bboxes shape: {bboxes.get_shape()}")
print(f"labels shape: {labels.get_shape()}")
print("=========")
print("-------- Random transform --------")
ds = ds \
.map(targetran.tf.TFRandomRotate(probability=1.0)) \
.map(targetran.tf.TFRandomShear(probability=1.0)) \
.map(targetran.tf.TFRandomTranslate(probability=1.0)) \
.map(targetran.tf.TFRandomFlipUpDown(probability=1.0)) \
.map(targetran.tf.TFRandomFlipLeftRight(probability=1.0)) \
.map(targetran.tf.TFRandomCrop(probability=1.0))
for sample in ds:
image, bboxes, labels = sample
print(f"transformed image shape: {image.get_shape()}")
print(f"transformed bboxes shape: {bboxes.get_shape()}")
print(f"transformed bboxes: {bboxes.numpy().tolist()}")
print(f"transformed labels shape: {labels.get_shape()}")
print(f"transformed labels: {labels.numpy().tolist()}")
print("=========")
print("-------- Random transform with combine-affine --------")
ds = targetran.tf.seqs_to_tf_dataset(image_seq, bboxes_seq, labels_seq)
affine_transforms = targetran.tf.TFCombineAffine([
targetran.tf.TFRandomRotate(probability=1.0),
targetran.tf.TFRandomShear(probability=1.0),
targetran.tf.TFRandomTranslate(probability=1.0),
targetran.tf.TFRandomFlipUpDown(probability=1.0),
targetran.tf.TFRandomFlipLeftRight(probability=1.0),
], probability=1.0)
ds = ds \
.map(targetran.tf.TFRandomCrop(probability=1.0)) \
.map(affine_transforms) \
.map(targetran.tf.TFResize((256, 256)))
for sample in ds:
image, bboxes, labels = sample
print(f"transformed image shape: {image.get_shape()}")
print(f"transformed bboxes shape: {bboxes.get_shape()}")
print(f"transformed bboxes: {bboxes.numpy().tolist()}")
print(f"transformed labels shape: {labels.get_shape()}")
print(f"transformed labels: {labels.numpy().tolist()}")
print("=========")
print("-------- Batching --------")
ds = ds.padded_batch(2, padding_values=np.nan)
for batch in ds:
image_batch, bboxes_batch, labels_batch = batch
print(f"transformed image batch shape: {image_batch.get_shape()}")
print(f"transformed bboxes batch shape: {bboxes_batch.get_shape()}")
print(f"transformed bboxes batch: {bboxes_batch.numpy().tolist()}")
print(f"transformed labels batch shape: {labels_batch.get_shape()}")
print(f"transformed labels batch: {labels_batch.numpy().tolist()}")
print("=========")
if __name__ == "__main__":
main()
#!/usr/bin/env python3
"""
TensorFlow Dataset timing.
"""
import os
from timeit import default_timer as timer
from typing import Iterator, Tuple
import tensorflow as tf # type: ignore
from targetran.tf import (
TFCombineAffine,
TFRandomFlipLeftRight,
TFRandomRotate,
TFRandomShear,
TFRandomCrop,
TFRandomTranslate,
TFResize,
)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
AUTO = tf.data.AUTOTUNE
rng = tf.random.Generator.from_seed(42)
def generator() -> Iterator[Tuple[tf.Tensor, tf.Tensor, tf.Tensor]]:
"""
Generate random data.
"""
sample_size = 100000
for _ in range(sample_size):
height = rng.uniform(
shape=(), minval=512, maxval=1024, dtype=tf.int32
)
width = rng.uniform(
shape=(), minval=512, maxval=1024, dtype=tf.int32
)
image = rng.uniform(
shape=(height, width, 3), minval=0, maxval=255, dtype=tf.int32
)
num_bboxes = rng.uniform(shape=(), minval=0, maxval=10, dtype=tf.int32)
bboxes = rng.uniform(
shape=(num_bboxes, 4), minval=16, maxval=256, dtype=tf.int32
)
labels = rng.uniform(
shape=(num_bboxes,), minval=0, maxval=20, dtype=tf.int32
)
yield image, bboxes, labels
def main() -> None:
ds = tf.data.Dataset.from_generator(
generator,
output_signature=(
tf.TensorSpec((None, None, 3)),
tf.TensorSpec((None, 4)),
tf.TensorSpec((None,))
)
)
affine_transform = TFCombineAffine([
TFRandomFlipLeftRight(),
TFRandomRotate(),
TFRandomShear(),
TFRandomTranslate()
])
ds = ds \
.map(TFRandomCrop(), num_parallel_calls=AUTO) \
.map(affine_transform, num_parallel_calls=AUTO) \
.map(TFResize(dest_size=(256, 256)), num_parallel_calls=AUTO)
logging_batch_size = 100
count = 0
print("Start...")
start = timer()
total_start = start
for _ in ds:
count += 1
if count % logging_batch_size == 0:
print(f"- Runtime for recent {logging_batch_size} samples: "
f"{timer() - start} s; "
f"total number of samples so far: {count}")
start = timer()
print("--------------")
print(f"Total runtime for {count} samples: {timer() - total_start}")
if __name__ == "__main__":
main()
+5
-3
Metadata-Version: 2.1
Name: targetran
Version: 0.12.0
Version: 0.12.1
Summary: Target transformation for data augmentation in objection detection

@@ -14,2 +14,4 @@ Home-page: https://github.com/bhky/targetran

License-File: LICENSE
Requires-Dist: opencv-python
Requires-Dist: numpy>=1.22.0

@@ -65,3 +67,3 @@ ![logo](logo/targetran_logo.png)

- [Image classification](#image-classification)
- [Examples](#examples)
- [Examples](#examples)
- [API](#api)

@@ -369,3 +371,3 @@

## Examples
# Examples

@@ -372,0 +374,0 @@ - [Code examples in this repository](examples)

@@ -50,3 +50,3 @@ ![logo](logo/targetran_logo.png)

- [Image classification](#image-classification)
- [Examples](#examples)
- [Examples](#examples)
- [API](#api)

@@ -354,3 +354,3 @@

## Examples
# Examples

@@ -357,0 +357,0 @@ - [Code examples in this repository](examples)

Metadata-Version: 2.1
Name: targetran
Version: 0.12.0
Version: 0.12.1
Summary: Target transformation for data augmentation in objection detection

@@ -14,2 +14,4 @@ Home-page: https://github.com/bhky/targetran

License-File: LICENSE
Requires-Dist: opencv-python
Requires-Dist: numpy>=1.22.0

@@ -65,3 +67,3 @@ ![logo](logo/targetran_logo.png)

- [Image classification](#image-classification)
- [Examples](#examples)
- [Examples](#examples)
- [API](#api)

@@ -369,3 +371,3 @@

## Examples
# Examples

@@ -372,0 +374,0 @@ - [Code examples in this repository](examples)

@@ -22,2 +22,7 @@ LICENSE

targetran/utils/__init__.py
targetran/utils/_utils.py
targetran/utils/_utils.py
tests/__init__.py
tests/_test.py
tests/run_pt_dataset_test.py
tests/run_tf_dataset_test.py
tests/run_tf_dataset_timing.py

@@ -1,3 +0,3 @@

__version__ = "0.12.0"
__version__ = "0.12.1"
__author__ = "Bosco Yung"
__license__ = "MIT"

@@ -6,3 +6,3 @@ """

import cv2 # type: ignore
import cv2
import numpy as np

@@ -84,3 +84,3 @@

"""
return x[mask] # type: ignore
return x[mask]

@@ -87,0 +87,0 @@

@@ -211,3 +211,3 @@ """

def _get_mats(
def _get_mats( # type: ignore[empty-body]
self,

@@ -290,3 +290,3 @@ image: NDFloatArray,

conditions = rand_fn() < probs
indices = np.arange(len(probs), dtype=np.int32)[conditions]
indices = np.arange(len(probs), dtype=np.int32)[conditions] # type: ignore

@@ -293,0 +293,0 @@ if len(indices) > 1:

@@ -263,3 +263,3 @@ """

def _get_mats(
def _get_mats( # type: ignore[empty-body]
self,

@@ -266,0 +266,0 @@ image: tf.Tensor,

@@ -23,2 +23,9 @@ """

def __call__(self, *args: Any) -> Any:
# This is needed, otherwise *args will "unpack" an image array.
if len(args) == 1:
x = args[0]
for fn in self.fns:
x = fn(x)
return x
for fn in self.fns:

@@ -25,0 +32,0 @@ args = fn(*args)