Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoSign in
Socket

targetran

Package Overview
Dependencies
Maintainers
1
Versions
65
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

targetran - pypi Package Compare versions

Comparing version
0.12.1
to
0.13.0
+52
-41
PKG-INFO
Metadata-Version: 2.1
Name: targetran
Version: 0.12.1
Version: 0.13.0
Summary: Target transformation for data augmentation in objection detection

@@ -71,3 +71,3 @@ Home-page: https://github.com/bhky/targetran

Tested for Python 3.8, 3.9, and 3.10.
Tested for Python 3.9, 3.10, and 3.11.

@@ -159,11 +159,11 @@ The best way to install Targetran with its dependencies is from PyPI:

from targetran.tf import (
seqs_to_tf_dataset,
TFCombineAffine,
TFRandomFlipLeftRight,
TFRandomFlipUpDown,
TFRandomRotate,
TFRandomShear,
TFRandomTranslate,
TFRandomCrop,
TFResize,
to_tf_dataset,
TFCombineAffine,
TFRandomFlipLeftRight,
TFRandomFlipUpDown,
TFRandomRotate,
TFRandomShear,
TFRandomTranslate,
TFRandomCrop,
TFResize,
)

@@ -173,5 +173,10 @@

# Users can have their own way to create the Dataset, as long as for each iteration
# it returns a tuple of tensors for a single sample: (image, bboxes, labels).
ds = seqs_to_tf_dataset(image_seq, bboxes_seq, labels_seq)
# it returns a tuple of tensors for a single example: (image, bboxes, labels).
ds = to_tf_dataset(image_seq, bboxes_seq, labels_seq)
# Alternatively, users can provide a sequence of image paths instead of image tensors/arrays,
# and set `image_seq_is_paths=True`. In that case, the actual image loading will be done during
# the dataset operation (i.e., lazy-loading). This is especially useful when dealing with huge data.
ds = to_tf_dataset(image_paths, bboxes_seq, labels_seq, image_seq_is_paths=True)
# The affine transformations can be combined into one operation for better performance.

@@ -181,8 +186,8 @@ # Note that cropping and resizing are not affine and cannot be combined.

affine_transform = TFCombineAffine(
[TFRandomRotate(probability=0.8), # Probability to include each affine transformation step
TFRandomShear(probability=0.6), # can be specified, otherwise the default value is used.
TFRandomTranslate(), # Thus, the number of selected steps could vary.
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
probability=1.0 # Probability to apply this single combined transformation.
[TFRandomRotate(probability=0.8), # Probability to include each affine transformation step
TFRandomShear(probability=0.6), # can be specified, otherwise the default value is used.
TFRandomTranslate(), # Thus, the number of selected steps could vary.
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
probability=1.0 # Probability to apply this single combined transformation.
)

@@ -194,11 +199,11 @@ # Option (2):

affine_transform = TFCombineAffine(
[TFRandomRotate(), # Individual `probability` has no effect in this approach.
TFRandomShear(),
TFRandomTranslate(),
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
num_selected_transforms=2, # Only two steps from the list will be selected.
selected_probabilities=[0.5, 0.0, 0.3, 0.2, 0.0], # Must sum up to 1.0, if given.
keep_order=True, # If True, the selected steps must be performed in the given order.
probability=1.0 # Probability to apply this single combined transformation.
[TFRandomRotate(), # Individual `probability` has no effect in this approach.
TFRandomShear(),
TFRandomTranslate(),
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
num_selected_transforms=2, # Only two steps from the list will be selected.
selected_probabilities=[0.5, 0.0, 0.3, 0.2, 0.0], # Must sum up to 1.0, if given.
keep_order=True, # If True, the selected steps must be performed in the given order.
probability=1.0 # Probability to apply this single combined transformation.
)

@@ -209,6 +214,8 @@ # Please refer to the API manual for more parameter options.

auto_tune = tf.data.AUTOTUNE
ds = ds \
.map(TFRandomCrop(probability=0.5), num_parallel_calls=auto_tune) \
.map(affine_transform, num_parallel_calls=auto_tune) \
.map(TFResize((256, 256)), num_parallel_calls=auto_tune)
ds = (
ds
.map(TFRandomCrop(probability=0.5), num_parallel_calls=auto_tune)
.map(affine_transform, num_parallel_calls=auto_tune)
.map(TFResize((256, 256)), num_parallel_calls=auto_tune)
)

@@ -220,3 +227,3 @@ # In the Dataset `map` call, the parameter `num_parallel_calls` can be set to,

# Batching:
# Since the array/tensor shape of each sample could be different, conventional
# Since the array/tensor shape of each example could be different, conventional
# way of batching may not work. Users will have to consider their own use cases.

@@ -263,2 +270,4 @@ # One possibly useful way is the padded-batch.

) -> None:
# It is also possible to provide image paths instead of image arrays here,
# and load the image in __getitem__. The details are skipped in this example.
self.image_seq = image_seq

@@ -328,3 +337,3 @@ self.bboxes_seq = bboxes_seq

# Users can have their own way to create the Dataset, as long as for each iteration
# it returns a tuple of arrays for a single sample: (image, bboxes, labels).
# it returns a tuple of arrays for a single example: (image, bboxes, labels).
ds = PTDataset(image_seq, bboxes_seq, labels_seq, transforms=transforms)

@@ -335,3 +344,3 @@ ```

# In PyTorch, it is common to use a Dataset with a DataLoader, which provides
# batching functionality. However, since the array/tensor shape of each sample
# batching functionality. However, since the array/tensor shape of each example
# could be different, the default batching may not work. Targetran provides

@@ -349,3 +358,3 @@ # a `collate_fn` that helps producing batches of (image_seq, bboxes_seq, labels_seq).

also be used for image classification in which only the images are to be transformed,
e.g., given a dataset that returns `(image, label)` samples, or even only `image` samples.
e.g., given a dataset that returns `(image, label)` examples, or even only `image` examples.
The `image_only` function can be used to convert a transformation class for this purpose.

@@ -363,7 +372,9 @@

# TensorFlow.
ds = ds \
.map(image_only(TFRandomCrop())) \
.map(image_only(affine_transform)) \
.map(image_only(TFResize((256, 256)))) \
.batch(32) # Conventional batching can be used for classification setup.
ds = (
ds
.map(image_only(TFRandomCrop()))
.map(image_only(affine_transform))
.map(image_only(TFResize((256, 256))))
.batch(32) # Conventional batching can be used for classification setup.
)
```

@@ -370,0 +381,0 @@ ```python

+51
-40

@@ -55,3 +55,3 @@ ![logo](logo/targetran_logo.png)

Tested for Python 3.8, 3.9, and 3.10.
Tested for Python 3.9, 3.10, and 3.11.

@@ -143,11 +143,11 @@ The best way to install Targetran with its dependencies is from PyPI:

from targetran.tf import (
seqs_to_tf_dataset,
TFCombineAffine,
TFRandomFlipLeftRight,
TFRandomFlipUpDown,
TFRandomRotate,
TFRandomShear,
TFRandomTranslate,
TFRandomCrop,
TFResize,
to_tf_dataset,
TFCombineAffine,
TFRandomFlipLeftRight,
TFRandomFlipUpDown,
TFRandomRotate,
TFRandomShear,
TFRandomTranslate,
TFRandomCrop,
TFResize,
)

@@ -157,5 +157,10 @@

# Users can have their own way to create the Dataset, as long as for each iteration
# it returns a tuple of tensors for a single sample: (image, bboxes, labels).
ds = seqs_to_tf_dataset(image_seq, bboxes_seq, labels_seq)
# it returns a tuple of tensors for a single example: (image, bboxes, labels).
ds = to_tf_dataset(image_seq, bboxes_seq, labels_seq)
# Alternatively, users can provide a sequence of image paths instead of image tensors/arrays,
# and set `image_seq_is_paths=True`. In that case, the actual image loading will be done during
# the dataset operation (i.e., lazy-loading). This is especially useful when dealing with huge data.
ds = to_tf_dataset(image_paths, bboxes_seq, labels_seq, image_seq_is_paths=True)
# The affine transformations can be combined into one operation for better performance.

@@ -165,8 +170,8 @@ # Note that cropping and resizing are not affine and cannot be combined.

affine_transform = TFCombineAffine(
[TFRandomRotate(probability=0.8), # Probability to include each affine transformation step
TFRandomShear(probability=0.6), # can be specified, otherwise the default value is used.
TFRandomTranslate(), # Thus, the number of selected steps could vary.
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
probability=1.0 # Probability to apply this single combined transformation.
[TFRandomRotate(probability=0.8), # Probability to include each affine transformation step
TFRandomShear(probability=0.6), # can be specified, otherwise the default value is used.
TFRandomTranslate(), # Thus, the number of selected steps could vary.
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
probability=1.0 # Probability to apply this single combined transformation.
)

@@ -178,11 +183,11 @@ # Option (2):

affine_transform = TFCombineAffine(
[TFRandomRotate(), # Individual `probability` has no effect in this approach.
TFRandomShear(),
TFRandomTranslate(),
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
num_selected_transforms=2, # Only two steps from the list will be selected.
selected_probabilities=[0.5, 0.0, 0.3, 0.2, 0.0], # Must sum up to 1.0, if given.
keep_order=True, # If True, the selected steps must be performed in the given order.
probability=1.0 # Probability to apply this single combined transformation.
[TFRandomRotate(), # Individual `probability` has no effect in this approach.
TFRandomShear(),
TFRandomTranslate(),
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
num_selected_transforms=2, # Only two steps from the list will be selected.
selected_probabilities=[0.5, 0.0, 0.3, 0.2, 0.0], # Must sum up to 1.0, if given.
keep_order=True, # If True, the selected steps must be performed in the given order.
probability=1.0 # Probability to apply this single combined transformation.
)

@@ -193,6 +198,8 @@ # Please refer to the API manual for more parameter options.

auto_tune = tf.data.AUTOTUNE
ds = ds \
.map(TFRandomCrop(probability=0.5), num_parallel_calls=auto_tune) \
.map(affine_transform, num_parallel_calls=auto_tune) \
.map(TFResize((256, 256)), num_parallel_calls=auto_tune)
ds = (
ds
.map(TFRandomCrop(probability=0.5), num_parallel_calls=auto_tune)
.map(affine_transform, num_parallel_calls=auto_tune)
.map(TFResize((256, 256)), num_parallel_calls=auto_tune)
)

@@ -204,3 +211,3 @@ # In the Dataset `map` call, the parameter `num_parallel_calls` can be set to,

# Batching:
# Since the array/tensor shape of each sample could be different, conventional
# Since the array/tensor shape of each example could be different, conventional
# way of batching may not work. Users will have to consider their own use cases.

@@ -247,2 +254,4 @@ # One possibly useful way is the padded-batch.

) -> None:
# It is also possible to provide image paths instead of image arrays here,
# and load the image in __getitem__. The details are skipped in this example.
self.image_seq = image_seq

@@ -312,3 +321,3 @@ self.bboxes_seq = bboxes_seq

# Users can have their own way to create the Dataset, as long as for each iteration
# it returns a tuple of arrays for a single sample: (image, bboxes, labels).
# it returns a tuple of arrays for a single example: (image, bboxes, labels).
ds = PTDataset(image_seq, bboxes_seq, labels_seq, transforms=transforms)

@@ -319,3 +328,3 @@ ```

# In PyTorch, it is common to use a Dataset with a DataLoader, which provides
# batching functionality. However, since the array/tensor shape of each sample
# batching functionality. However, since the array/tensor shape of each example
# could be different, the default batching may not work. Targetran provides

@@ -333,3 +342,3 @@ # a `collate_fn` that helps producing batches of (image_seq, bboxes_seq, labels_seq).

also be used for image classification in which only the images are to be transformed,
e.g., given a dataset that returns `(image, label)` samples, or even only `image` samples.
e.g., given a dataset that returns `(image, label)` examples, or even only `image` examples.
The `image_only` function can be used to convert a transformation class for this purpose.

@@ -347,7 +356,9 @@

# TensorFlow.
ds = ds \
.map(image_only(TFRandomCrop())) \
.map(image_only(affine_transform)) \
.map(image_only(TFResize((256, 256)))) \
.batch(32) # Conventional batching can be used for classification setup.
ds = (
ds
.map(image_only(TFRandomCrop()))
.map(image_only(affine_transform))
.map(image_only(TFResize((256, 256))))
.batch(32) # Conventional batching can be used for classification setup.
)
```

@@ -354,0 +365,0 @@ ```python

Metadata-Version: 2.1
Name: targetran
Version: 0.12.1
Version: 0.13.0
Summary: Target transformation for data augmentation in objection detection

@@ -71,3 +71,3 @@ Home-page: https://github.com/bhky/targetran

Tested for Python 3.8, 3.9, and 3.10.
Tested for Python 3.9, 3.10, and 3.11.

@@ -159,11 +159,11 @@ The best way to install Targetran with its dependencies is from PyPI:

from targetran.tf import (
seqs_to_tf_dataset,
TFCombineAffine,
TFRandomFlipLeftRight,
TFRandomFlipUpDown,
TFRandomRotate,
TFRandomShear,
TFRandomTranslate,
TFRandomCrop,
TFResize,
to_tf_dataset,
TFCombineAffine,
TFRandomFlipLeftRight,
TFRandomFlipUpDown,
TFRandomRotate,
TFRandomShear,
TFRandomTranslate,
TFRandomCrop,
TFResize,
)

@@ -173,5 +173,10 @@

# Users can have their own way to create the Dataset, as long as for each iteration
# it returns a tuple of tensors for a single sample: (image, bboxes, labels).
ds = seqs_to_tf_dataset(image_seq, bboxes_seq, labels_seq)
# it returns a tuple of tensors for a single example: (image, bboxes, labels).
ds = to_tf_dataset(image_seq, bboxes_seq, labels_seq)
# Alternatively, users can provide a sequence of image paths instead of image tensors/arrays,
# and set `image_seq_is_paths=True`. In that case, the actual image loading will be done during
# the dataset operation (i.e., lazy-loading). This is especially useful when dealing with huge data.
ds = to_tf_dataset(image_paths, bboxes_seq, labels_seq, image_seq_is_paths=True)
# The affine transformations can be combined into one operation for better performance.

@@ -181,8 +186,8 @@ # Note that cropping and resizing are not affine and cannot be combined.

affine_transform = TFCombineAffine(
[TFRandomRotate(probability=0.8), # Probability to include each affine transformation step
TFRandomShear(probability=0.6), # can be specified, otherwise the default value is used.
TFRandomTranslate(), # Thus, the number of selected steps could vary.
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
probability=1.0 # Probability to apply this single combined transformation.
[TFRandomRotate(probability=0.8), # Probability to include each affine transformation step
TFRandomShear(probability=0.6), # can be specified, otherwise the default value is used.
TFRandomTranslate(), # Thus, the number of selected steps could vary.
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
probability=1.0 # Probability to apply this single combined transformation.
)

@@ -194,11 +199,11 @@ # Option (2):

affine_transform = TFCombineAffine(
[TFRandomRotate(), # Individual `probability` has no effect in this approach.
TFRandomShear(),
TFRandomTranslate(),
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
num_selected_transforms=2, # Only two steps from the list will be selected.
selected_probabilities=[0.5, 0.0, 0.3, 0.2, 0.0], # Must sum up to 1.0, if given.
keep_order=True, # If True, the selected steps must be performed in the given order.
probability=1.0 # Probability to apply this single combined transformation.
[TFRandomRotate(), # Individual `probability` has no effect in this approach.
TFRandomShear(),
TFRandomTranslate(),
TFRandomFlipLeftRight(),
TFRandomFlipUpDown()],
num_selected_transforms=2, # Only two steps from the list will be selected.
selected_probabilities=[0.5, 0.0, 0.3, 0.2, 0.0], # Must sum up to 1.0, if given.
keep_order=True, # If True, the selected steps must be performed in the given order.
probability=1.0 # Probability to apply this single combined transformation.
)

@@ -209,6 +214,8 @@ # Please refer to the API manual for more parameter options.

auto_tune = tf.data.AUTOTUNE
ds = ds \
.map(TFRandomCrop(probability=0.5), num_parallel_calls=auto_tune) \
.map(affine_transform, num_parallel_calls=auto_tune) \
.map(TFResize((256, 256)), num_parallel_calls=auto_tune)
ds = (
ds
.map(TFRandomCrop(probability=0.5), num_parallel_calls=auto_tune)
.map(affine_transform, num_parallel_calls=auto_tune)
.map(TFResize((256, 256)), num_parallel_calls=auto_tune)
)

@@ -220,3 +227,3 @@ # In the Dataset `map` call, the parameter `num_parallel_calls` can be set to,

# Batching:
# Since the array/tensor shape of each sample could be different, conventional
# Since the array/tensor shape of each example could be different, conventional
# way of batching may not work. Users will have to consider their own use cases.

@@ -263,2 +270,4 @@ # One possibly useful way is the padded-batch.

) -> None:
# It is also possible to provide image paths instead of image arrays here,
# and load the image in __getitem__. The details are skipped in this example.
self.image_seq = image_seq

@@ -328,3 +337,3 @@ self.bboxes_seq = bboxes_seq

# Users can have their own way to create the Dataset, as long as for each iteration
# it returns a tuple of arrays for a single sample: (image, bboxes, labels).
# it returns a tuple of arrays for a single example: (image, bboxes, labels).
ds = PTDataset(image_seq, bboxes_seq, labels_seq, transforms=transforms)

@@ -335,3 +344,3 @@ ```

# In PyTorch, it is common to use a Dataset with a DataLoader, which provides
# batching functionality. However, since the array/tensor shape of each sample
# batching functionality. However, since the array/tensor shape of each example
# could be different, the default batching may not work. Targetran provides

@@ -349,3 +358,3 @@ # a `collate_fn` that helps producing batches of (image_seq, bboxes_seq, labels_seq).

also be used for image classification in which only the images are to be transformed,
e.g., given a dataset that returns `(image, label)` samples, or even only `image` samples.
e.g., given a dataset that returns `(image, label)` examples, or even only `image` examples.
The `image_only` function can be used to convert a transformation class for this purpose.

@@ -363,7 +372,9 @@

# TensorFlow.
ds = ds \
.map(image_only(TFRandomCrop())) \
.map(image_only(affine_transform)) \
.map(image_only(TFResize((256, 256)))) \
.batch(32) # Conventional batching can be used for classification setup.
ds = (
ds
.map(image_only(TFRandomCrop()))
.map(image_only(affine_transform))
.map(image_only(TFResize((256, 256))))
.batch(32) # Conventional batching can be used for classification setup.
)
```

@@ -370,0 +381,0 @@ ```python

@@ -1,3 +0,3 @@

__version__ = "0.12.1"
__version__ = "0.13.0"
__author__ = "Bosco Yung"
__license__ = "MIT"
from ._tf import (
to_tf as to_tf,
seqs_to_tf_dataset as seqs_to_tf_dataset,
to_tf_dataset as to_tf_dataset,
to_keras_cv_dict as to_keras_cv_dict,
to_keras_cv_model_input as to_keras_cv_model_input,
tf_flip_left_right as tf_flip_left_right,

@@ -5,0 +7,0 @@ tf_flip_up_down as tf_flip_up_down,

@@ -46,6 +46,13 @@ """

def load_tf_image(image_path: str) -> tf.Tensor:
return tf.image.decode_image(
tf.io.read_file(image_path), channels=3, expand_animations=False
)
def to_tf(
image_seq: Sequence[T],
bboxes_seq: Sequence[T],
labels_seq: Sequence[T]
labels_seq: Sequence[T],
image_seq_is_paths: bool = False,
) -> Tuple[Sequence[tf.Tensor], Sequence[tf.Tensor], Sequence[tf.Tensor]]:

@@ -60,3 +67,3 @@ """

tuples.append(
(_tf_convert(image),
(image if image_seq_is_paths else _tf_convert(image),
tf.reshape(_tf_convert(bboxes), (-1, 4)),

@@ -66,18 +73,29 @@ _tf_convert(labels))

tf_image_seq, tf_bboxes_seq, tf_labels_seq = tuple(zip(*tuples))
if image_seq_is_paths:
tf_image_seq = tf.convert_to_tensor(tf_image_seq, tf.string)
return tf_image_seq, tf_bboxes_seq, tf_labels_seq
def seqs_to_tf_dataset(
def to_tf_dataset(
image_seq: Sequence[T],
bboxes_seq: Sequence[T],
labels_seq: Sequence[T]
labels_seq: Sequence[T],
image_seq_is_paths: bool = False,
) -> tf.data.Dataset:
tf_image_seq, tf_bboxes_seq, tf_labels_seq = to_tf(
image_seq, bboxes_seq, labels_seq
image_seq, bboxes_seq, labels_seq, image_seq_is_paths
)
if image_seq_is_paths:
ds_image = tf.data.Dataset.from_tensor_slices(tf_image_seq)
ds_image = ds_image.map(load_tf_image)
else:
ds_image = tf.data.Dataset.from_tensor_slices(tf.ragged.stack(tf_image_seq))
# Tensors of different shapes can be included in a TF Dataset
# as ragged-tensors.
ds = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(tf.ragged.stack(tf_image_seq)),
ds_image,
tf.data.Dataset.from_tensor_slices(tf.ragged.stack(tf_bboxes_seq)),

@@ -101,2 +119,36 @@ tf.data.Dataset.from_tensor_slices(tf.ragged.stack(tf_labels_seq))

def to_keras_cv_dict(
ds: tf.data.Dataset,
batch_size: Optional[int] = None,
drop_remainder: bool = True,
) -> tf.data.Dataset:
import keras_cv # type: ignore
ds = ds.map(lambda i, b, l: (i, {"boxes": b, "classes": l}))
if batch_size:
ds = ds.ragged_batch(batch_size=batch_size, drop_remainder=drop_remainder)
ds = ds.map(lambda i, d: {"images": i, "bounding_boxes": d})
return ds
def to_keras_cv_model_input(
ds: tf.data.Dataset,
max_num_bboxes: Optional[int] = None,
fill_value: int = -1,
) -> tf.data.Dataset:
import keras_cv
return ds.map(
lambda d: (
d["images"],
keras_cv.bounding_box.to_dense(
d["bounding_boxes"],
max_boxes=max_num_bboxes,
default_value=fill_value
)
)
)
def _tf_get_affine_dependency() -> _AffineDependency:

@@ -103,0 +155,0 @@ return _AffineDependency(

@@ -19,3 +19,3 @@ """

to_tf,
seqs_to_tf_dataset,
to_tf_dataset,
tf_flip_left_right,

@@ -564,4 +564,4 @@ tf_flip_up_down,

def test_seqs_to_tf_dataset(self) -> None:
ds = seqs_to_tf_dataset(
def test_to_tf_dataset(self) -> None:
ds = to_tf_dataset(
ORIGINAL_IMAGE_SEQ, ORIGINAL_BBOXES_SEQ, ORIGINAL_LABELS_SEQ

@@ -580,3 +580,3 @@ )

ds_image_only = seqs_to_tf_dataset(
ds_image_only = to_tf_dataset(
ORIGINAL_IMAGE_SEQ, [], []

@@ -583,0 +583,0 @@ )

@@ -95,4 +95,4 @@ #!/usr/bin/env python3

for sample in ds:
image, bboxes, labels = sample
for example in ds:
image, bboxes, labels = example
print(f"transformed image shape: {image.shape}")

@@ -99,0 +99,0 @@ print(f"transformed bboxes shape: {bboxes.shape}")

@@ -45,8 +45,8 @@ #!/usr/bin/env python3

ds = targetran.tf.seqs_to_tf_dataset(image_seq, bboxes_seq, labels_seq)
ds = targetran.tf.to_tf_dataset(image_seq, bboxes_seq, labels_seq)
print("-------- Raw data --------")
for sample in ds:
image, bboxes, labels = sample
for example in ds:
image, bboxes, labels = example
print(f"image shape: {image.get_shape()}")

@@ -67,4 +67,4 @@ print(f"bboxes shape: {bboxes.get_shape()}")

for sample in ds:
image, bboxes, labels = sample
for example in ds:
image, bboxes, labels = example
print(f"transformed image shape: {image.get_shape()}")

@@ -79,3 +79,3 @@ print(f"transformed bboxes shape: {bboxes.get_shape()}")

ds = targetran.tf.seqs_to_tf_dataset(image_seq, bboxes_seq, labels_seq)
ds = targetran.tf.to_tf_dataset(image_seq, bboxes_seq, labels_seq)

@@ -95,4 +95,4 @@ affine_transforms = targetran.tf.TFCombineAffine([

for sample in ds:
image, bboxes, labels = sample
for example in ds:
image, bboxes, labels = example
print(f"transformed image shape: {image.get_shape()}")

@@ -99,0 +99,0 @@ print(f"transformed bboxes shape: {bboxes.get_shape()}")

@@ -82,8 +82,8 @@ #!/usr/bin/env python3

if count % logging_batch_size == 0:
print(f"- Runtime for recent {logging_batch_size} samples: "
print(f"- Runtime for recent {logging_batch_size} examples: "
f"{timer() - start} s; "
f"total number of samples so far: {count}")
f"total number of examples so far: {count}")
start = timer()
print("--------------")
print(f"Total runtime for {count} samples: {timer() - total_start}")
print(f"Total runtime for {count} examples: {timer() - total_start}")

@@ -90,0 +90,0 @@