Commit 04f1733a authored by Oleh Astappiev's avatar Oleh Astappiev
Browse files

chore: rename map function

parent 6f761cb4
...@@ -11,7 +11,7 @@ from src.model.siamese import SiameseModel ...@@ -11,7 +11,7 @@ from src.model.siamese import SiameseModel
model_name = 'imagenette_alexnet' model_name = 'imagenette_alexnet'
embeddings_name = model_name + '_embeddings' embeddings_name = model_name + '_embeddings'
train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, preprocess_fn=AlexNetModel.preprocess_input) train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, map_fn=AlexNetModel.preprocess_input)
comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds) comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds)
# create model # create model
......
...@@ -11,7 +11,7 @@ from src.model.siamese import SiameseModel ...@@ -11,7 +11,7 @@ from src.model.siamese import SiameseModel
model_name = 'cifar10_alexnet' model_name = 'cifar10_alexnet'
embeddings_name = model_name + '_embeddings' embeddings_name = model_name + '_embeddings'
train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, preprocess_fn=AlexNetModel.preprocess_input) train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, map_fn=AlexNetModel.preprocess_input)
comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds) comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds)
# create model # create model
......
...@@ -7,7 +7,7 @@ NUM_CLASSES = 10 ...@@ -7,7 +7,7 @@ NUM_CLASSES = 10
CLASS_NAMES = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] CLASS_NAMES = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=None): def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, map_fn=None):
train_ds = tf.keras.utils.image_dataset_from_directory( train_ds = tf.keras.utils.image_dataset_from_directory(
directory='../datasets/cifar10/train/', directory='../datasets/cifar10/train/',
labels='inferred', labels='inferred',
...@@ -27,15 +27,15 @@ def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=Non ...@@ -27,15 +27,15 @@ def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=Non
interpolation='nearest' interpolation='nearest'
) )
if preprocess_fn is not None: if map_fn is not None:
train_ds = train_ds.map(preprocess_fn).prefetch(tf.data.AUTOTUNE) train_ds = train_ds.map(map_fn).prefetch(tf.data.AUTOTUNE)
test_ds = test_ds.map(preprocess_fn).prefetch(tf.data.AUTOTUNE) test_ds = test_ds.map(map_fn).prefetch(tf.data.AUTOTUNE)
return train_ds, test_ds return train_ds, test_ds
def load_dataset3(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=None): def load_dataset3(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, map_fn=None):
train_ds, test_ds = load_dataset(image_size=image_size, batch_size=batch_size, preprocess_fn=preprocess_fn) train_ds, test_ds = load_dataset(image_size=image_size, batch_size=batch_size, map_fn=map_fn)
train_ds_size = tf.data.experimental.cardinality(train_ds).numpy() train_ds_size = tf.data.experimental.cardinality(train_ds).numpy()
train_ds = train_ds.skip(train_ds_size / 10) train_ds = train_ds.skip(train_ds_size / 10)
......
...@@ -7,7 +7,7 @@ NUM_CLASSES = 10 ...@@ -7,7 +7,7 @@ NUM_CLASSES = 10
CLASS_NAMES = ['fish', 'dog', 'player', 'saw', 'building', 'music', 'truck', 'gas', 'ball', 'parachute'] CLASS_NAMES = ['fish', 'dog', 'player', 'saw', 'building', 'music', 'truck', 'gas', 'ball', 'parachute']
def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=None): def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, map_fn=None):
train_ds = tf.keras.utils.image_dataset_from_directory( train_ds = tf.keras.utils.image_dataset_from_directory(
directory='../datasets/imagenette2/train/', directory='../datasets/imagenette2/train/',
labels='inferred', labels='inferred',
...@@ -27,15 +27,15 @@ def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=Non ...@@ -27,15 +27,15 @@ def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=Non
interpolation='nearest' interpolation='nearest'
) )
if preprocess_fn is not None: if map_fn is not None:
train_ds = train_ds.map(preprocess_fn).prefetch(tf.data.AUTOTUNE) train_ds = train_ds.map(map_fn).prefetch(tf.data.AUTOTUNE)
test_ds = test_ds.map(preprocess_fn).prefetch(tf.data.AUTOTUNE) test_ds = test_ds.map(map_fn).prefetch(tf.data.AUTOTUNE)
return train_ds, test_ds return train_ds, test_ds
def load_dataset3(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=None): def load_dataset3(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, map_fn=None):
train_ds, test_ds = load_dataset(image_size=image_size, batch_size=batch_size, preprocess_fn=preprocess_fn) train_ds, test_ds = load_dataset(image_size=image_size, batch_size=batch_size, map_fn=map_fn)
test_ds_size = tf.data.experimental.cardinality(test_ds).numpy() test_ds_size = tf.data.experimental.cardinality(test_ds).numpy()
val_ds = test_ds.take(test_ds_size / 2) val_ds = test_ds.take(test_ds_size / 2)
......
...@@ -7,7 +7,7 @@ NUM_CLASSES = 3 ...@@ -7,7 +7,7 @@ NUM_CLASSES = 3
CLASS_NAMES = ['building', 'dog', 'player'] CLASS_NAMES = ['building', 'dog', 'player']
def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=None): def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, map_fn=None):
ds = tf.keras.utils.image_dataset_from_directory( ds = tf.keras.utils.image_dataset_from_directory(
directory='../datasets/simple3/', directory='../datasets/simple3/',
labels='inferred', labels='inferred',
...@@ -17,14 +17,14 @@ def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=Non ...@@ -17,14 +17,14 @@ def load_dataset(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=Non
interpolation='nearest' interpolation='nearest'
) )
if preprocess_fn is not None: if map_fn is not None:
ds = ds.map(preprocess_fn).prefetch(tf.data.AUTOTUNE) ds = ds.map(map_fn).prefetch(tf.data.AUTOTUNE)
return ds return ds
def load_dataset3(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=None): def load_dataset3(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, map_fn=None):
ds = load_dataset(image_size=image_size, batch_size=batch_size, preprocess_fn=preprocess_fn) ds = load_dataset(image_size=image_size, batch_size=batch_size, map_fn=map_fn)
ds_size = tf.data.experimental.cardinality(ds).numpy() ds_size = tf.data.experimental.cardinality(ds).numpy()
train_ds = ds.take(ds_size * 0.6) train_ds = ds.take(ds_size * 0.6)
......
...@@ -10,7 +10,7 @@ from src.model.siamese import SiameseModel ...@@ -10,7 +10,7 @@ from src.model.siamese import SiameseModel
model_name = 'cifar10_efficientnet' model_name = 'cifar10_efficientnet'
embeddings_name = model_name + '_embeddings' embeddings_name = model_name + '_embeddings'
train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, batch_size=BATCH_SIZE, preprocess_fn=EfficientNetModel.preprocess_input) train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, batch_size=BATCH_SIZE, map_fn=EfficientNetModel.preprocess_input)
comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds) comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds)
model = EfficientNetModel() model = EfficientNetModel()
......
...@@ -8,24 +8,23 @@ from src.utils.common import get_modeldir ...@@ -8,24 +8,23 @@ from src.utils.common import get_modeldir
from src.model.mobilenet import MobileNetModel, PRETRAIN_EPOCHS, TARGET_SHAPE, EMBEDDING_VECTOR_DIMENSION from src.model.mobilenet import MobileNetModel, PRETRAIN_EPOCHS, TARGET_SHAPE, EMBEDDING_VECTOR_DIMENSION
from src.model.siamese import SiameseModel from src.model.siamese import SiameseModel
model_name = 'imagenet_mobilenet' model_name = 'imagenette_mobilenet'
embeddings_name = model_name + '_embeddings' embeddings_name = model_name + '_embeddings'
train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, preprocess_fn=MobileNetModel.preprocess_input) train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, map_fn=MobileNetModel.preprocess_input)
comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds) comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds)
PRETRAIN_TOTAL_STEPS = PRETRAIN_EPOCHS * len(train_ds) PRETRAIN_TOTAL_STEPS = PRETRAIN_EPOCHS * len(train_ds)
# create model
model = MobileNetModel() model = MobileNetModel()
model.compile(optimizer=tf.keras.optimizers.RMSprop(tf.keras.optimizers.schedules.CosineDecay(1e-3, PRETRAIN_TOTAL_STEPS))) model.compile(optimizer=tf.keras.optimizers.RMSprop(tf.keras.optimizers.schedules.CosineDecay(1e-3, PRETRAIN_TOTAL_STEPS)))
model.summary() model.summary()
# load weights # load weights
model.load_weights(get_modeldir(model_name + '.h5')) # model.load_weights(get_modeldir(model_name + '.h5'))
# train & save model # train & save model
# model.fit(train_ds, epochs=PRETRAIN_EPOCHS, validation_data=val_ds) model.fit(train_ds, epochs=PRETRAIN_EPOCHS, validation_data=val_ds)
# model.save_weights(get_modeldir(model_name + '.h5')) model.save_weights(get_modeldir(model_name + '.h5'))
# evaluate # evaluate
print('evaluating...') print('evaluating...')
......
...@@ -11,7 +11,7 @@ from src.model.siamese import SiameseModel ...@@ -11,7 +11,7 @@ from src.model.siamese import SiameseModel
model_name = 'cifar10_mobilenet' model_name = 'cifar10_mobilenet'
embeddings_name = model_name + '_embeddings' embeddings_name = model_name + '_embeddings'
train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, preprocess_fn=MobileNetModel.preprocess_input) train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, map_fn=MobileNetModel.preprocess_input)
comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds) comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds)
PRETRAIN_TOTAL_STEPS = PRETRAIN_EPOCHS * len(train_ds) PRETRAIN_TOTAL_STEPS = PRETRAIN_EPOCHS * len(train_ds)
...@@ -55,6 +55,6 @@ inference_model.save(get_modeldir(model_name + '_inference.tf')) ...@@ -55,6 +55,6 @@ inference_model.save(get_modeldir(model_name + '_inference.tf'))
print('visualization') print('visualization')
# compute embeddings of the images and their labels, store them in a tsv file for visualization # compute vectors of the images and their labels, store them in a tsv file for visualization
siamese_vectors, siamese_labels = calc_vectors(comb_ds, inference_model) siamese_vectors, siamese_labels = calc_vectors(comb_ds, inference_model)
project_embeddings(siamese_vectors, siamese_labels, model_name + '_siamese') project_embeddings(siamese_vectors, siamese_labels, model_name + '_siamese')
import time
import tensorflow as tf import tensorflow as tf
from os import path, curdir from os import path, curdir
import time
def normalize_image(image): def normalize_image(image):
......
...@@ -11,7 +11,7 @@ from src.model.siamese import SiameseModel ...@@ -11,7 +11,7 @@ from src.model.siamese import SiameseModel
model_name = 'imagenet_vgg16' model_name = 'imagenet_vgg16'
embeddings_name = model_name + '_embeddings' embeddings_name = model_name + '_embeddings'
train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, preprocess_fn=VGG16Model.preprocess_input) train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, map_fn=VGG16Model.preprocess_input)
comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds) comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds)
PRETRAIN_TOTAL_STEPS = PRETRAIN_EPOCHS * len(train_ds) PRETRAIN_TOTAL_STEPS = PRETRAIN_EPOCHS * len(train_ds)
......
...@@ -13,7 +13,7 @@ embeddings_name = model_name + '_embeddings' ...@@ -13,7 +13,7 @@ embeddings_name = model_name + '_embeddings'
TARGET_SHAPE = (32, 32) TARGET_SHAPE = (32, 32)
train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, preprocess_fn=VGG16Model.preprocess_input) train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, map_fn=VGG16Model.preprocess_input)
comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds) comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds)
PRETRAIN_TOTAL_STEPS = PRETRAIN_EPOCHS * len(train_ds) PRETRAIN_TOTAL_STEPS = PRETRAIN_EPOCHS * len(train_ds)
......
...@@ -13,7 +13,7 @@ embeddings_name = model_name + '_embeddings' ...@@ -13,7 +13,7 @@ embeddings_name = model_name + '_embeddings'
TARGET_SHAPE = (32, 32) TARGET_SHAPE = (32, 32)
train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, preprocess_fn=VGG16Model.preprocess_input) train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, map_fn=VGG16Model.preprocess_input)
comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds) comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds)
PRETRAIN_TOTAL_STEPS = PRETRAIN_EPOCHS * len(train_ds) PRETRAIN_TOTAL_STEPS = PRETRAIN_EPOCHS * len(train_ds)
......
...@@ -11,7 +11,7 @@ from src.model.siamese import SiameseModel ...@@ -11,7 +11,7 @@ from src.model.siamese import SiameseModel
model_name = 'cifar10_vit' model_name = 'cifar10_vit'
embeddings_name = model_name + '_embeddings' embeddings_name = model_name + '_embeddings'
train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, batch_size=BATCH_SIZE, preprocess_fn=VitModel.preprocess_input) train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, batch_size=BATCH_SIZE, map_fn=VitModel.preprocess_input)
comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds) comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds)
model = VitModel() model = VitModel()
...@@ -24,7 +24,7 @@ save_embeddings(emb_vectors, emb_labels, embeddings_name) ...@@ -24,7 +24,7 @@ save_embeddings(emb_vectors, emb_labels, embeddings_name)
# emb_vectors, emb_labels = load_embeddings(embeddings_name) # emb_vectors, emb_labels = load_embeddings(embeddings_name)
# siamese is the model we train # siamese is the model we train
siamese = SiameseModel(embedding_vector_dimension=384, image_vector_dimensions=512) siamese = SiameseModel(embedding_vector_dimension=384, image_vector_dimensions=3)
siamese.compile(loss_margin=0.05, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001)) siamese.compile(loss_margin=0.05, optimizer=tf.keras.optimizers.Adam(learning_rate=0.001))
siamese.summary() siamese.summary()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment