Commit 8c2437e2 authored by Oleh Astappiev's avatar Oleh Astappiev
Browse files

feat: finally, working Siamese network

parent 6921f81c
numpy~=1.22.3 numpy~=1.22.3
tensorflow~=2.8.0 tensorflow~=2.9.0
tensorboard~=2.8.0 tensorboard~=2.9.0
matplotlib~=3.5.1 matplotlib~=3.5.2
opencv-python~=4.5.5.64 opencv-python~=4.5.5.64
tqdm~=4.64.0
tensorflow-addons~=0.16.1
scipy~=1.8.1
...@@ -56,7 +56,7 @@ NUM_EPOCHS = 3 ...@@ -56,7 +56,7 @@ NUM_EPOCHS = 3
TRAIN_BATCH_SIZE = 128 TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 1000 STEPS_PER_EPOCH = 1000
ds = SiameseModel.prepare_dataset(embeddings, embedding_labels).batch(TRAIN_BATCH_SIZE) # .prefetch(tf.data.AUTOTUNE) ds = SiameseModel.prepare_dataset(embeddings, embedding_labels)
history = siamese.fit(ds, epochs=NUM_EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, class_weight={0: 1 / NUM_CLASSES, 1: (NUM_CLASSES - 1) / NUM_CLASSES}) history = siamese.fit(ds, epochs=NUM_EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, class_weight={0: 1 / NUM_CLASSES, 1: (NUM_CLASSES - 1) / NUM_CLASSES})
# Build full inference model (from image to image vector): # Build full inference model (from image to image vector):
......
...@@ -56,7 +56,7 @@ NUM_EPOCHS = 3 ...@@ -56,7 +56,7 @@ NUM_EPOCHS = 3
TRAIN_BATCH_SIZE = 128 TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 1000 STEPS_PER_EPOCH = 1000
ds = SiameseModel.prepare_dataset(embeddings, embedding_labels).batch(TRAIN_BATCH_SIZE) # .prefetch(tf.data.AUTOTUNE) ds = SiameseModel.prepare_dataset(embeddings, embedding_labels)
history = siamese.fit(ds, epochs=NUM_EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, class_weight={0: 1 / NUM_CLASSES, 1: (NUM_CLASSES - 1) / NUM_CLASSES}) history = siamese.fit(ds, epochs=NUM_EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, class_weight={0: 1 / NUM_CLASSES, 1: (NUM_CLASSES - 1) / NUM_CLASSES})
# Build full inference model (from image to image vector): # Build full inference model (from image to image vector):
......
import tensorflow as tf import tensorflow as tf
BATCH_SIZE = 32 BATCH_SIZE = 6
IMAGE_SIZE = (400, 320) IMAGE_SIZE = (400, 320)
NUM_CLASSES = 3 NUM_CLASSES = 3
...@@ -27,12 +27,13 @@ def load_dataset3(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=No ...@@ -27,12 +27,13 @@ def load_dataset3(image_size=IMAGE_SIZE, batch_size=BATCH_SIZE, preprocess_fn=No
ds = load_dataset(image_size=image_size, batch_size=batch_size, preprocess_fn=preprocess_fn) ds = load_dataset(image_size=image_size, batch_size=batch_size, preprocess_fn=preprocess_fn)
ds_size = tf.data.experimental.cardinality(ds).numpy() ds_size = tf.data.experimental.cardinality(ds).numpy()
train_ds = ds.take(ds_size * 0.7) train_ds = ds.take(ds_size * 0.6)
val_ds = ds.skip(ds_size * 0.7).take(ds_size * 0.15) val_ds = ds.skip(ds_size * 0.6).take(ds_size * 0.2)
test_ds = ds.skip(ds_size * 0.7).skip(ds_size * 0.15) test_ds = ds.skip(ds_size * 0.6).skip(ds_size * 0.2)
if True: if True:
print("Simple 3 dataset loaded") print("Simple 3 dataset loaded")
print("Total dataset size:", ds_size)
print("Training data size:", tf.data.experimental.cardinality(train_ds).numpy()) print("Training data size:", tf.data.experimental.cardinality(train_ds).numpy())
print("Validation data size:", tf.data.experimental.cardinality(val_ds).numpy()) print("Validation data size:", tf.data.experimental.cardinality(val_ds).numpy())
print("Evaluation data size:", tf.data.experimental.cardinality(test_ds).numpy()) print("Evaluation data size:", tf.data.experimental.cardinality(test_ds).numpy())
......
...@@ -81,7 +81,7 @@ NUM_EPOCHS = 50 ...@@ -81,7 +81,7 @@ NUM_EPOCHS = 50
TRAIN_BATCH_SIZE = 128 TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 2000 STEPS_PER_EPOCH = 2000
ds = SiameseModel.prepare_dataset(embeddings, labels).batch(TRAIN_BATCH_SIZE) # .prefetch(tf.data.AUTOTUNE) ds = SiameseModel.prepare_dataset(embeddings, labels)
history = siamese.fit( history = siamese.fit(
ds, ds,
epochs=NUM_EPOCHS, epochs=NUM_EPOCHS,
......
...@@ -57,7 +57,7 @@ NUM_EPOCHS = 3 ...@@ -57,7 +57,7 @@ NUM_EPOCHS = 3
TRAIN_BATCH_SIZE = 128 TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 1000 STEPS_PER_EPOCH = 1000
ds = SiameseModel.prepare_dataset(embeddings, embedding_labels).batch(TRAIN_BATCH_SIZE) # .prefetch(tf.data.AUTOTUNE) ds = SiameseModel.prepare_dataset(embeddings, embedding_labels)
history = siamese.fit( history = siamese.fit(
ds, ds,
epochs=NUM_EPOCHS, epochs=NUM_EPOCHS,
......
...@@ -57,7 +57,7 @@ TRAIN_BATCH_SIZE = 128 ...@@ -57,7 +57,7 @@ TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 1000 STEPS_PER_EPOCH = 1000
ds = SiameseModel.prepare_dataset(embeddings, embedding_labels).batch(TRAIN_BATCH_SIZE) # .prefetch(tf.data.AUTOTUNE) ds = SiameseModel.prepare_dataset(embeddings, embedding_labels)
history = siamese.fit( history = siamese.fit(
ds, ds,
epochs=NUM_EPOCHS, epochs=NUM_EPOCHS,
......
...@@ -10,7 +10,7 @@ TARGET_SHAPE = (227, 227) ...@@ -10,7 +10,7 @@ TARGET_SHAPE = (227, 227)
PRETRAIN_EPOCHS = 50 PRETRAIN_EPOCHS = 50
class AlexNetModel(Sequential): class AlexNetModel(Sequential):
def __init__(self): def __init__(self, classes=10):
super(AlexNetModel, self).__init__([ super(AlexNetModel, self).__init__([
layers.Conv2D(filters=96, kernel_size=(11, 11), strides=(4, 4), activation='relu', input_shape=TARGET_SHAPE + (3,)), layers.Conv2D(filters=96, kernel_size=(11, 11), strides=(4, 4), activation='relu', input_shape=TARGET_SHAPE + (3,)),
layers.BatchNormalization(), layers.BatchNormalization(),
...@@ -39,7 +39,7 @@ class AlexNetModel(Sequential): ...@@ -39,7 +39,7 @@ class AlexNetModel(Sequential):
layers.Dense(4096, activation='relu'), layers.Dense(4096, activation='relu'),
layers.Dropout(rate=0.5), layers.Dropout(rate=0.5),
layers.Dense(name='unfreeze', units=10, activation='softmax') layers.Dense(name='unfreeze', units=classes, activation='softmax')
]) ])
def compile(self, def compile(self,
......
from src.utils.common import * from src.utils.common import *
from src.utils.distance import cosine_distance, ContrastiveLoss import tensorflow_addons as tfa
from src.utils.distance import cosine_distance, euclidean_distance
from tensorflow.keras import layers, callbacks, Model from tensorflow.keras import layers, callbacks, Model
tensorboard_cb = callbacks.TensorBoard(get_logdir('siamese/fit')) tensorboard_cb = callbacks.TensorBoard(get_logdir('siamese/fit'))
...@@ -7,11 +8,11 @@ tensorboard_cb = callbacks.TensorBoard(get_logdir('siamese/fit')) ...@@ -7,11 +8,11 @@ tensorboard_cb = callbacks.TensorBoard(get_logdir('siamese/fit'))
EMBEDDING_VECTOR_DIMENSION = 4096 EMBEDDING_VECTOR_DIMENSION = 4096
IMAGE_VECTOR_DIMENSIONS = 3 # use for test visualization on tensorboard IMAGE_VECTOR_DIMENSIONS = 3 # use for test visualization on tensorboard
ACTIVATION_FN = 'tanh' # same as in paper ACTIVATION_FN = 'tanh' # same as in paper
MARGIN = 0.05 MARGIN = 0.5
TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 1000
NUM_EPOCHS = 3 NUM_EPOCHS = 3
TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 100 # 1000
@tf.function @tf.function
...@@ -45,12 +46,14 @@ class SiameseModel(Model): ...@@ -45,12 +46,14 @@ class SiameseModel(Model):
self.projection_model = tf.keras.models.Sequential([ self.projection_model = tf.keras.models.Sequential([
# layers.Dense(image_vector_dimensions, activation=ACTIVATION_FN, input_shape=(embedding_vector_dimension,)) # layers.Dense(image_vector_dimensions, activation=ACTIVATION_FN, input_shape=(embedding_vector_dimension,))
layers.Dense(128, activation='relu', input_shape=(embedding_vector_dimension,)), layers.Dense(128, activation='relu', input_shape=(embedding_vector_dimension,)),
layers.Dense(image_vector_dimensions, activation=None) layers.Dense(image_vector_dimensions, activation=None),
layers.Lambda(lambda x: tf.keras.backend.l2_normalize(x, axis=1)),
]) ])
v1 = self.projection_model(emb_input_1) v1 = self.projection_model(emb_input_1)
v2 = self.projection_model(emb_input_2) v2 = self.projection_model(emb_input_2)
computed_distance = layers.Lambda(cosine_distance)([v1, v2]) computed_distance = layers.Lambda(cosine_distance)([v1, v2])
# computed_distance = layers.Lambda(euclidean_distance)([v1, v2])
super(SiameseModel, self).__init__(inputs=[emb_input_1, emb_input_2], outputs=computed_distance) super(SiameseModel, self).__init__(inputs=[emb_input_1, emb_input_2], outputs=computed_distance)
...@@ -68,9 +71,8 @@ class SiameseModel(Model): ...@@ -68,9 +71,8 @@ class SiameseModel(Model):
def compile(self, def compile(self,
optimizer=tf.keras.optimizers.RMSprop(), optimizer=tf.keras.optimizers.RMSprop(),
loss_margin=MARGIN, loss_margin=MARGIN,
metrics=['accuracy'],
**kwargs): **kwargs):
super().compile(optimizer=optimizer, loss=ContrastiveLoss(margin=loss_margin), metrics=metrics, **kwargs) super().compile(optimizer=optimizer, loss=tfa.losses.ContrastiveLoss(margin=loss_margin), **kwargs)
def fit(self, x=None, y=None, batch_size=None, epochs=NUM_EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, callbacks=[tensorboard_cb], **kwargs): def fit(self, x=None, y=None, batch_size=None, epochs=NUM_EPOCHS, steps_per_epoch=STEPS_PER_EPOCH, callbacks=[tensorboard_cb], **kwargs):
return super().fit(x=x, y=y, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, callbacks=callbacks, **kwargs) return super().fit(x=x, y=y, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch, callbacks=callbacks, **kwargs)
...@@ -100,4 +102,5 @@ class SiameseModel(Model): ...@@ -100,4 +102,5 @@ class SiameseModel(Model):
# resample to the desired distribution # resample to the desired distribution
# train_ds = train_ds.rejection_resample(lambda embs, target: tf.cast(target, tf.int32), [0.5, 0.5], initial_dist=[0.9, 0.1]) # train_ds = train_ds.rejection_resample(lambda embs, target: tf.cast(target, tf.int32), [0.5, 0.5], initial_dist=[0.9, 0.1])
# train_ds = train_ds.map(lambda _, vals: vals) # discard the prepended "selected" class from the rejction resample, since we aleady have it available # train_ds = train_ds.map(lambda _, vals: vals) # discard the prepended "selected" class from the rejction resample, since we aleady have it available
train_ds = train_ds.batch(TRAIN_BATCH_SIZE) # .prefetch(tf.data.AUTOTUNE)
return train_ds return train_ds
import sys
sys.path.append("..")
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from scipy.spatial import distance_matrix
from src.data.simple3 import load_dataset3, NUM_CLASSES
from src.utils.embeddings import project_embeddings, calc_vectors
from src.utils.common import get_modeldir
from src.model.alexnet import AlexNetModel, TARGET_SHAPE
from src.model.siamese import SiameseModel
# tf.config.run_functions_eagerly(True)
model_name = 'simple3_alexnet'
embeddings_name = model_name + '_embeddings'
train_ds, val_ds, test_ds = load_dataset3(image_size=TARGET_SHAPE, preprocess_fn=AlexNetModel.preprocess_input)
comb_ds = train_ds.concatenate(val_ds).concatenate(test_ds)
# create model
model = AlexNetModel(NUM_CLASSES)
model.compile()
model.summary()
# load weights
model.load_weights(get_modeldir(model_name + '.h5'))
# train & save model
# model.fit(train_ds, validation_data=val_ds)
# model.save_weights(get_modeldir(model_name + '.h5'))
# evaluate
# print('evaluating...')
# model.evaluate(test_ds)
# alexnet_vectors, alexnet_labels = calc_vectors(comb_ds, model)
# project_embeddings(alexnet_vectors, alexnet_labels, model_name + '_alexnet')
for layer in model.layers:
layer.trainable = False
print('calculating embeddings...')
embedding_model = tf.keras.Model(inputs=model.input, outputs=model.layers[-2].output)
embedding_model.summary()
emb_vectors, emb_labels = calc_vectors(comb_ds, embedding_model)
project_embeddings(emb_vectors, emb_labels, model_name + '_emb')
# siamese is the model we train
siamese = SiameseModel(embedding_vector_dimension=4096, image_vector_dimensions=3)
siamese.compile(loss_margin=0.1) # TODO: experiment with high value, e.g. 2
siamese.summary()
ds = SiameseModel.prepare_dataset(emb_vectors, emb_labels)
history = siamese.fit(ds, class_weight={0: 1 / NUM_CLASSES, 1: (NUM_CLASSES - 1) / NUM_CLASSES})
# Build full inference model (from image to image vector):
inference_model = siamese.get_inference_model(embedding_model)
inference_model.save(get_modeldir(model_name + '_inference.tf'), save_format='tf', include_optimizer=False)
# inference_model = tf.keras.models.load_model(get_modeldir(model_name + '_inference.tf'), compile=False)
print('visualization')
# compute vectors of the images and their labels, store them in a tsv file for visualization
siamese_vectors, siamese_labels = calc_vectors(comb_ds, inference_model)
project_embeddings(siamese_vectors, siamese_labels, model_name + '_siamese')
dstm = distance_matrix(siamese_vectors, siamese_vectors)
print('Under the margin', (dstm < 0.1).sum() / 2)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
_pts = np.random.uniform(size=[500, 3], low=-1, high=1)
_pts = _pts / np.linalg.norm(_pts, axis=-1)[:, None]
ax.scatter(_pts[:, 0], _pts[:, 1], _pts[:, 2])
ax.scatter(siamese_vectors[:, 0], siamese_vectors[:, 1], siamese_vectors[:, 2])
ax.set_xlim([-1,1])
ax.set_ylim([-1,1])
ax.set_zlim([-1,1])
ax.invert_zaxis()
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
\ No newline at end of file
...@@ -2,8 +2,6 @@ import tensorflow as tf ...@@ -2,8 +2,6 @@ import tensorflow as tf
from tensorflow.keras import layers, backend from tensorflow.keras import layers, backend
# Provided two tensors t1 and t2
# Euclidean distance = sqrt(sum(square(t1-t2)))
def euclidean_distance(vects): def euclidean_distance(vects):
"""Find the Euclidean distance between two vectors. """Find the Euclidean distance between two vectors.
...@@ -15,6 +13,8 @@ def euclidean_distance(vects): ...@@ -15,6 +13,8 @@ def euclidean_distance(vects):
(as floating point value) between vectors. (as floating point value) between vectors.
""" """
# Provided two tensors t1 and t2
# Euclidean distance = sqrt(sum(square(t1-t2)))
x, y = vects x, y = vects
sum_square = tf.math.reduce_sum(tf.math.square(x - y), axis=1, keepdims=True) sum_square = tf.math.reduce_sum(tf.math.square(x - y), axis=1, keepdims=True)
return tf.math.sqrt(tf.math.maximum(sum_square, backend.epsilon())) return tf.math.sqrt(tf.math.maximum(sum_square, backend.epsilon()))
...@@ -35,45 +35,3 @@ def cosine_distance(vects): ...@@ -35,45 +35,3 @@ def cosine_distance(vects):
# 1 is perpendicular vectors and 2 is opposite vectors # 1 is perpendicular vectors and 2 is opposite vectors
cosine_similarity = layers.Dot(axes=1, normalize=True)(vects) cosine_similarity = layers.Dot(axes=1, normalize=True)(vects)
return 1 - cosine_similarity return 1 - cosine_similarity
"""
### Contrastive Loss
$ Loss = Y*Dist(v_1,v_2)^2 + (1-Y)*max(margin-D,0)^2$
$Y$ is the GT target (1 if $v_1$ and $v_2$ belong to the same class, 0 otherwise). If images are from the same class, use the squared distance as loss (you want to push the distance to be close to 0 for same-class couples), otherwise keep the (squared) maximum between 0 and $margin - D$.
For different-class couples, the distance should be pushed to a high value. The **margin identifies a cone inside which vectors are considered the same**. For cosine distance, which has range [0,2], **1 is NOT an adequate value**).
**NOTE** In the loss implementation below, we calculate the mean of the two terms, though this should not actually be necessary (the minimizer value for the loss is the same whether the loss is divided by 2 or not).
"""
def ContrastiveLoss(margin=1):
"""Provides 'constrastive_loss' an enclosing scope with variable 'margin'.
Arguments:
margin: Integer, defines the baseline for distance for which pairs
should be classified as dissimilar. - (default is 1).
Returns:
'constrastive_loss' function with data ('margin') attached.
"""
# Contrastive loss = mean( (1-true_value) * square(prediction) +
# true_value * square( max(margin-prediction, 0) ))
def contrastive_loss(y_true, y_pred):
"""Calculates the constrastive loss.
Arguments:
y_true: List of labels (1 for same-class pair, 0 for different-class), fp32.
y_pred: List of predicted distances, fp32.
Returns:
A tensor containing constrastive loss as floating point value.
"""
square_dist = tf.math.square(y_pred)
margin_square = tf.math.square(tf.math.maximum(margin - (y_pred), 0))
return tf.math.reduce_mean((1 - y_true) * square_dist + (y_true) * margin_square)
return contrastive_loss
...@@ -3,6 +3,7 @@ import time ...@@ -3,6 +3,7 @@ import time
import _pickle as pickle import _pickle as pickle
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from tqdm import tqdm
from pathlib import Path from pathlib import Path
from tensorboard.plugins import projector from tensorboard.plugins import projector
from google.protobuf import text_format from google.protobuf import text_format
...@@ -35,6 +36,33 @@ def export_embeddings(values, labels, name='embeddings'): ...@@ -35,6 +36,33 @@ def export_embeddings(values, labels, name='embeddings'):
writer.writerow([i, label_str, value_str]) writer.writerow([i, label_str, value_str])
def calc_vectors(ds, model):
ds_vectors = []
ds_labels = []
for ds_batch in tqdm(ds):
images, labels = ds_batch
predictions = model(images)
ds_vectors.extend(predictions.numpy().tolist())
ds_labels.extend(labels.numpy().tolist())
return np.array(ds_vectors), np.array(ds_labels)
def evaluate_vectors(values, labels):
total = len(values)
match = 0
missmatch = 0
for i, (expected) in enumerate(labels):
pred = np.argmax(values[i])
if expected == pred:
match += 1
else:
missmatch += 1
return match / total
def project_embeddings(image_vectors, labels, name='projection'): def project_embeddings(image_vectors, labels, name='projection'):
root_dir = Path(get_logdir_root()) root_dir = Path(get_logdir_root())
projection_name = name + '_' + str(time.strftime('%Y_%m_%d-%H_%M_%S')) projection_name = name + '_' + str(time.strftime('%Y_%m_%d-%H_%M_%S'))
......
...@@ -57,7 +57,7 @@ NUM_EPOCHS = 3 ...@@ -57,7 +57,7 @@ NUM_EPOCHS = 3
TRAIN_BATCH_SIZE = 128 TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 1000 STEPS_PER_EPOCH = 1000
ds = SiameseModel.prepare_dataset(embeddings, embedding_labels).batch(TRAIN_BATCH_SIZE) # .prefetch(tf.data.AUTOTUNE) ds = SiameseModel.prepare_dataset(embeddings, embedding_labels)
history = siamese.fit( history = siamese.fit(
ds, ds,
epochs=NUM_EPOCHS, epochs=NUM_EPOCHS,
......
...@@ -59,7 +59,7 @@ NUM_EPOCHS = 3 ...@@ -59,7 +59,7 @@ NUM_EPOCHS = 3
TRAIN_BATCH_SIZE = 128 TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 1000 STEPS_PER_EPOCH = 1000
ds = SiameseModel.prepare_dataset(embeddings, embedding_labels).batch(TRAIN_BATCH_SIZE) # .prefetch(tf.data.AUTOTUNE) ds = SiameseModel.prepare_dataset(embeddings, embedding_labels)
history = siamese.fit( history = siamese.fit(
ds, ds,
epochs=NUM_EPOCHS, epochs=NUM_EPOCHS,
......
...@@ -68,7 +68,7 @@ NUM_EPOCHS = 3 ...@@ -68,7 +68,7 @@ NUM_EPOCHS = 3
TRAIN_BATCH_SIZE = 128 TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 1000 STEPS_PER_EPOCH = 1000
ds = SiameseModel.prepare_dataset(embeddings, embedding_labels).batch(TRAIN_BATCH_SIZE) # .prefetch(tf.data.AUTOTUNE) ds = SiameseModel.prepare_dataset(embeddings, embedding_labels)
history = siamese.fit( history = siamese.fit(
ds, ds,
epochs=NUM_EPOCHS, epochs=NUM_EPOCHS,
......
...@@ -91,7 +91,7 @@ NUM_EPOCHS = 10 ...@@ -91,7 +91,7 @@ NUM_EPOCHS = 10
TRAIN_BATCH_SIZE = 128 TRAIN_BATCH_SIZE = 128
STEPS_PER_EPOCH = 3000 STEPS_PER_EPOCH = 3000
ds = SiameseModel.prepare_dataset(embeddings, labels).batch(TRAIN_BATCH_SIZE) # .prefetch(tf.data.AUTOTUNE) ds = SiameseModel.prepare_dataset(embeddings, labels)
history = siamese.fit( history = siamese.fit(
ds, ds,
epochs=NUM_EPOCHS, epochs=NUM_EPOCHS,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment