Skip to content

Commit

Permalink
add deepfake
Browse files Browse the repository at this point in the history
  • Loading branch information
RocketFlash committed Mar 18, 2020
1 parent 18a786c commit 5650968
Show file tree
Hide file tree
Showing 10 changed files with 513 additions and 219 deletions.
24 changes: 12 additions & 12 deletions configs/bengali.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ MODEL:
encodings_len: 256
mode : 'triplet'
distance_type : 'l1'
backbone_name : 'efficientnet-b0'
backbone_weights : 'imagenet'
backbone_name : 'efficientnet-b5'
backbone_weights : 'noisy-student'
freeze_backbone : False
embeddings_normalization: True

Expand All @@ -19,16 +19,16 @@ DATALOADER:
GENERATOR:
negatives_selection_mode : 'semihard'
k_classes: 3
k_samples: 5
margin: 0.5
batch_size : 10
n_batches : 10
augmentations : 'default'
k_samples: 3
margin: 0.3
batch_size : 8
n_batches : 500
augmentations : 'none'

TRAIN:
# optimizer parameters
optimizer : 'radam'
learning_rate : 0.0001
learning_rate : 0.00001
decay_factor : 0.99
step_size : 1

Expand All @@ -45,13 +45,13 @@ TRAIN:
# decay_factor : 0.99
# step_size : 1

# batch_size : 8
# batch_size : 16
# val_steps : 200
# steps_per_epoch : 10
# n_epochs : 1
# steps_per_epoch : 1000
# n_epochs : 50

SAVE_PATHS:
project_name : 'bengali_efficientnet'
project_name : 'bengali_efn_b5'
work_dir : 'work_dirs/'

ENCODINGS:
Expand Down
62 changes: 62 additions & 0 deletions configs/deepfake.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
MODEL:
input_shape : [224, 224, 3]
encodings_len: 256
mode : 'triplet'
distance_type : 'l1'
backbone_name : 'efficientnet-b3'
backbone_weights : 'noisy-student'
freeze_backbone : False
embeddings_normalization: True

DATALOADER:
dataset_path : '/home/rauf/datasets/aaaa/deepfake/'
csv_file :
image_id_column :
label_column :
validate : True
val_ratio : 0.2

GENERATOR:
negatives_selection_mode : 'hardest'
k_classes: 2
k_samples: 3
margin: 0.5
batch_size : 8
n_batches : 7000
augmentations : 'deepfake'

TRAIN:
# optimizer parameters
optimizer : 'radam'
learning_rate : 0.000016
decay_factor : 0.99
step_size : 1

# embeddings learning training parameters
n_epochs : 1000

# plot training history
plot_history : True

# SOFTMAX_PRETRAINING:
# # softmax pretraining parameters
# optimizer : 'radam'
# learning_rate : 0.0001
# decay_factor : 0.99
# step_size : 1

# batch_size : 16
# val_steps : 200
# steps_per_epoch : 1000
# n_epochs : 50

SAVE_PATHS:
project_name : 'deepfake_efn_b3'
work_dir : 'work_dirs/'

ENCODINGS:
# encodings parameters
save_encodings : True
centers_only: False
max_num_samples_of_each_class : 30
knn_k : 1
59 changes: 59 additions & 0 deletions configs/road_signs.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
MODEL:
input_shape : [48, 48, 3]
encodings_len: 256
mode : 'triplet'
distance_type : 'l1'
backbone_name : 'efficientnet-b0'
backbone_weights : 'imagenet'
freeze_backbone : False
embeddings_normalization: True

DATALOADER:
dataset_path : '/home/rauf/datasets/road_signs/road_signs_separated/train/'
validate : True
val_ratio : 0.2

GENERATOR:
negatives_selection_mode : 'semihard'
k_classes: 2
k_samples: 3
margin: 0.5
batch_size : 1
n_batches : 1
augmentations : 'none'

TRAIN:
# optimizer parameters
optimizer : 'adam'
learning_rate : 0.0001
decay_factor : 0.99
step_size : 1

# embeddings learning training parameters
n_epochs : 1000

# plot training history
plot_history : True

SOFTMAX_PRETRAINING:
# softmax pretraining parameters
optimizer : 'radam'
learning_rate : 0.0001
decay_factor : 0.99
step_size : 1

batch_size : 8
val_steps : 200
steps_per_epoch : 10
n_epochs : 1

SAVE_PATHS:
project_name : 'road_signs_efficientnet'
work_dir : 'work_dirs/'

ENCODINGS:
# encodings parameters
save_encodings : True
centers_only: False
max_num_samples_of_each_class : 30
knn_k : 1
4 changes: 4 additions & 0 deletions embedding_net/augmentations.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,10 @@ def get_aug(name='default', input_shape=[48, 48, 3]):
A.GaussNoise(var_limit=(50, 80), p=0.3),
A.RandomCrop(p=0.8, height=2*input_shape[1]/3, width=2*input_shape[0]/3)
], p=1)
elif name == 'deepfake':
augmentations = A.Compose([
A.HorizontalFlip(p=0.5),
], p=1)
elif name == 'plates2':
augmentations = A.Compose([
A.CLAHE(clip_limit=(1,4),p=0.3),
Expand Down
88 changes: 87 additions & 1 deletion embedding_net/backbones.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,11 @@
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
import tensorflow.keras.backend as K

from tensorflow.keras.callbacks import TensorBoard, LearningRateScheduler
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from .datagenerators import SimpleDataGenerator
import os
import numpy as np

def get_backbone(input_shape,
encodings_len=4096,
Expand Down Expand Up @@ -117,4 +121,86 @@ def get_backbone(input_shape,
base_model = Model(
inputs=[backbone_model.input], outputs=[encoded_output])

base_model._make_predict_function()

return base_model, backbone_model


def pretrain_backbone_softmax(backbone_model, data_loader, params_softmax, params_save_paths):

optimizer = params_softmax['optimizer']
learning_rate = params_softmax['learning_rate']
decay_factor = params_softmax['decay_factor']
step_size = params_softmax['step_size']

input_shape = params_softmax['input_shape']
batch_size = params_softmax['batch_size']
val_steps = params_softmax['val_steps']
steps_per_epoch = params_softmax['steps_per_epoch']
n_epochs = params_softmax['n_epochs']
augmentations = params_softmax['augmentations']

n_classes = data_loader.n_classes

x = GlobalAveragePooling2D()(backbone_model.output)

output = Dense(n_classes, activation='softmax')(x)
model = Model(inputs=[backbone_model.input], outputs=[output])

# train
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=['accuracy'])

train_generator = SimpleDataGenerator(data_loader.train_data,
data_loader.class_names,
input_shape=input_shape,
batch_size = batch_size,
n_batches = steps_per_epoch,
augmentations=augmentations)

if data_loader.validate:
val_generator = SimpleDataGenerator(data_loader.val_data,
data_loader.class_names,
input_shape=input_shape,
batch_size = batch_size,
n_batches = steps_per_epoch,
augmentations=augmentations)
checkpoint_callback_monitor = 'val_loss'
else:
val_generator = None
checkpoint_callback_monitor = 'loss'

tensorboard_save_path = os.path.join(
params_save_paths['work_dir'],
params_save_paths['project_name'],
'pretraining_model/tf_log/')
weights_save_file = os.path.join(
params_save_paths['work_dir'],
params_save_paths['project_name'],
'pretraining_model/weights/',
params_save_paths['project_name']+'_{epoch:03d}_{val_acc:03f}' +'.h5')

callbacks = [
LearningRateScheduler(lambda x: learning_rate *
decay_factor ** np.floor(x/step_size)),
ReduceLROnPlateau(monitor=checkpoint_callback_monitor, factor=0.1,
patience=20, verbose=1),
EarlyStopping(monitor=checkpoint_callback_monitor,
patience=10,
verbose=1,
restore_best_weights=True),
# TensorBoard(log_dir=tensorboard_save_path),
ModelCheckpoint(filepath=weights_save_file,
verbose=1,
monitor=checkpoint_callback_monitor,
save_best_only=True)]
# checkpoints_load_name = 'work_dirs/bengali_efficientnet/pretraining_model/weights/bengali_efficientnet_020_0.932969.h5'
# model.load_weights(checkpoints_load_name, by_name=True)
history = model.fit_generator(train_generator,
steps_per_epoch=steps_per_epoch,
epochs=n_epochs,
verbose=1,
validation_data=val_generator,
validation_steps=val_steps,
callbacks=callbacks)
Loading

0 comments on commit 5650968

Please sign in to comment.