Skip to content

Commit

Permalink
Merge branch 'master' into mlpmixer
Browse files Browse the repository at this point in the history
  • Loading branch information
soma2000-lang authored Feb 27, 2023
2 parents 3abe7d9 + 54fcdf2 commit 9c01ece
Show file tree
Hide file tree
Showing 300 changed files with 7,877 additions and 2,308 deletions.
86 changes: 50 additions & 36 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,59 +31,73 @@ To learn more about the future project direction, please check the [roadmap](.gi
- [Roadmap](.github/ROADMAP.md)
- [API Design Guidelines](.github/API_DESIGN.md)

## Quickstart
## Installation

To install the latest official release:

```
pip install keras-cv tensorflow --upgrade
```

Create a preprocessing pipeline:
To install the latest unreleased changes to the library, we recommend using
pip to install directly from the master branch on github:

```
pip install git+https://github.com/keras-team/keras-cv.git tensorflow --upgrade
```

## Quickstart

```python
import keras_cv
import tensorflow as tf
from tensorflow import keras
import tensorflow_datasets as tfds

# Create a preprocessing pipeline
augmenter = keras_cv.layers.Augmenter(
layers=[
keras_cv.layers.RandomFlip(),
keras_cv.layers.RandAugment(value_range=(0, 255)),
keras_cv.layers.CutMix(),
keras_cv.layers.MixUp()
layers=[
keras_cv.layers.RandomFlip(),
keras_cv.layers.RandAugment(value_range=(0, 255)),
keras_cv.layers.CutMix(),
keras_cv.layers.MixUp()
]
)

def augment_data(images, labels):
labels = tf.one_hot(labels, 3)
inputs = {"images": images, "labels": labels}
outputs = augmenter(inputs)
return outputs['images'], outputs['labels']
```

Augment a `tf.data.Dataset`:

```python
dataset = tfds.load('rock_paper_scissors', as_supervised=True, split='train')
dataset = dataset.batch(64)
dataset = dataset.map(augment_data, num_parallel_calls=tf.data.AUTOTUNE)
```

Create a model:

```python
def preprocess_data(images, labels, augment=False):
labels = tf.one_hot(labels, 3)
inputs = {"images": images, "labels": labels}
outputs = augmenter(inputs) if augment else inputs
return outputs['images'], outputs['labels']

# Augment a `tf.data.Dataset`
train_dataset, test_dataset = tfds.load(
'rock_paper_scissors',
as_supervised=True,
split=['train', 'test'],
)
train_dataset = train_dataset.batch(16).map(
lambda x, y: preprocess_data(x, y, augment=True),
num_parallel_calls=tf.data.AUTOTUNE).prefetch(
tf.data.AUTOTUNE)
test_dataset = test_dataset.batch(16).map(
preprocess_data, num_parallel_calls=tf.data.AUTOTUNE).prefetch(
tf.data.AUTOTUNE)

# Create a model
densenet = keras_cv.models.DenseNet121(
include_rescaling=True,
include_top=True,
classes=3
include_rescaling=True,
include_top=True,
classes=3
)
densenet.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
```

Train your model:

```python
densenet.fit(dataset)
# Train your model
densenet.fit(train_dataset, validation_data=test_dataset)
```

## Contributors
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,7 @@ def produce_random_data(include_confidence=False, num_images=128, classes=20):
)
)

images = [
keras_cv.bounding_box.pad_batch_to_shape(x, [25, images[0].shape[1]])
for x in images
]
images = [keras_cv.bounding_box.to_dense(x, max_boxes=25) for x in images]
return tf.stack(images, axis=0)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,7 @@ def produce_random_data(include_confidence=False, num_images=128, classes=20):
)
)

images = [
keras_cv.bounding_box.pad_batch_to_shape(x, [25, images[0].shape[1]])
for x in images
]
images = [keras_cv.bounding_box.to_dense(x, max_boxes=25) for x in images]
return tf.stack(images, axis=0)


Expand Down
7 changes: 2 additions & 5 deletions benchmarks/metrics/coco/recall_performance.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,7 @@ def produce_random_data(include_confidence=False, num_images=128, classes=20):
)
)

images = [
keras_cv.bounding_box.pad_batch_to_shape(x, [25, images[0].shape[1]])
for x in images
]
images = [keras_cv.bounding_box.to_dense(x, max_boxes=25) for x in images]
return tf.stack(images, axis=0)


Expand All @@ -56,7 +53,7 @@ def produce_random_data(include_confidence=False, num_images=128, classes=20):
for images in n_images:
y_true = produce_random_data(num_images=images)
y_pred = produce_random_data(num_images=images, include_confidence=True)
metric = coco._COCORecall(class_ids)
metric = coco._BoxRecall(class_ids)
# warm up
metric.update_state(y_true, y_pred)
metric.result()
Expand Down
4 changes: 3 additions & 1 deletion benchmarks/vectorization_strategy_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,9 @@ def single_rectangle_mask(corners, mask_shape):
return masks


def fill_single_rectangle(image, centers_x, centers_y, widths, heights, fill_values):
def fill_single_rectangle(
image, centers_x, centers_y, widths, heights, fill_values
):
"""Fill rectangles with fill value into images.
Args:
Expand Down
176 changes: 176 additions & 0 deletions benchmarks/vectorized_auto_contrast.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,176 @@
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time

import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras

from keras_cv.layers import AutoContrast
from keras_cv.layers.preprocessing.base_image_augmentation_layer import (
BaseImageAugmentationLayer,
)
from keras_cv.utils import preprocessing


class OldAutoContrast(BaseImageAugmentationLayer):
"""Performs the AutoContrast operation on an image.
Auto contrast stretches the values of an image across the entire available
`value_range`. This makes differences between pixels more obvious. An example of
this is if an image only has values `[0, 1]` out of the range `[0, 255]`, auto
contrast will change the `1` values to be `255`.
Args:
value_range: the range of values the incoming images will have.
Represented as a two number tuple written [low, high].
This is typically either `[0, 1]` or `[0, 255]` depending
on how your preprocessing pipeline is setup.
"""

def __init__(
self,
value_range,
**kwargs,
):
super().__init__(**kwargs)
self.value_range = value_range

def augment_image(self, image, transformation=None, **kwargs):
original_image = image
image = preprocessing.transform_value_range(
image,
original_range=self.value_range,
target_range=(0, 255),
dtype=self.compute_dtype,
)

low = tf.reduce_min(tf.reduce_min(image, axis=0), axis=0)
high = tf.reduce_max(tf.reduce_max(image, axis=0), axis=0)
scale = 255.0 / (high - low)
offset = -low * scale

image = image * scale[None, None] + offset[None, None]
result = tf.clip_by_value(image, 0.0, 255.0)
result = preprocessing.transform_value_range(
result,
original_range=(0, 255),
target_range=self.value_range,
dtype=self.compute_dtype,
)
# don't process NaN channels
result = tf.where(tf.math.is_nan(result), original_image, result)
return result

def augment_bounding_boxes(self, bounding_boxes, **kwargs):
return bounding_boxes

def augment_label(self, label, transformation=None, **kwargs):
return label

def augment_segmentation_mask(
self, segmentation_mask, transformation, **kwargs
):
return segmentation_mask

def get_config(self):
config = super().get_config()
config.update({"value_range": self.value_range})
return config


class AutoContrastConsistencyTest(tf.test.TestCase):
def test_consistency_with_old_implementation(self):
images = tf.random.uniform(shape=(16, 32, 32, 3))

output = AutoContrast(value_range=(0, 1))(images)
old_output = OldAutoContrast(value_range=(0, 1))(images)

self.assertAllClose(old_output, output)


if __name__ == "__main__":
(x_train, _), _ = keras.datasets.cifar10.load_data()
x_train = x_train.astype(float)

images = []
num_images = [1000, 2000, 5000, 10000]
results = {}

for aug in [AutoContrast, OldAutoContrast]:
c = aug.__name__

layer = aug(value_range=(0, 255))

runtimes = []
print(f"Timing {c}")

for n_images in num_images:
# warmup
layer(x_train[:n_images])

t0 = time.time()
r1 = layer(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1 - t0}")

results[c] = runtimes

c = aug.__name__ + " Graph Mode"

layer = aug(value_range=(0, 255))

@tf.function()
def apply_aug(inputs):
return layer(inputs)

runtimes = []
print(f"Timing {c}")

for n_images in num_images:
# warmup
apply_aug(x_train[:n_images])

t0 = time.time()
r1 = apply_aug(x_train[:n_images])
t1 = time.time()
runtimes.append(t1 - t0)
print(f"Runtime for {c}, n_images={n_images}: {t1 - t0}")

results[c] = runtimes

plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")

plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()

# So we can actually see more relevant margins
del results["OldAutoContrast"]

plt.figure()
for key in results:
plt.plot(num_images, results[key], label=key)
plt.xlabel("Number images")

plt.ylabel("Runtime (seconds)")
plt.legend()
plt.show()

# Compare two implementations
tf.test.main()
Loading

0 comments on commit 9c01ece

Please sign in to comment.