-
Notifications
You must be signed in to change notification settings - Fork 6
/
BlindMI_Diff_W.py
74 lines (63 loc) · 3.63 KB
/
BlindMI_Diff_W.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from BlindMIUtil import *
from dataLoader import *
import tensorflow as tf
from tensorflow.keras.models import load_model
import sys
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
tf.config.experimental.set_memory_growth(tf.config.experimental.list_physical_devices('GPU')[0], True)
DATA_NAME = sys.argv[1] if len(sys.argv) > 1 else "CIFAR"
TARGET_MODEL_GENRE = sys.argv[2] if len(sys.argv) > 2 else "ResNet50"
TARGET_WEIGHTS_PATH = "weights/Target/{}_{}.hdf5".format(DATA_NAME, TARGET_MODEL_GENRE)
(x_train_tar, y_train_tar), (x_test_tar, y_test_tar), m_true = globals()['load_' + DATA_NAME]('TargetModel')
Target_Model = load_model(TARGET_WEIGHTS_PATH)
def diff_Mem_attack(x_, y_true, m_true, target_model, non_Mem_Generator=sobel):
'''
Attck the target with BLINDMI-DIFF-W, BLINDMI-DIFF with gernerated non-member.
The non-member is generated by randomly chosen data and the number is 20 by default.
If the data has been shuffled, please directly remove the process of shuffling.
:param target_model: the model that will be attacked
:param x_: the data that target model may used for training
:param y_true: the label of x_
:param m_true: one of 0 and 1, which represents each of x_ has been trained or not.
:param non_Mem_Generator: the method to generate the non-member data. The default non-member generator
is Sobel.
:return: Tensor arrays of results
'''
y_pred = target_model.predict(x_)
mix = np.c_[y_pred[y_true.astype(bool)], np.sort(y_pred, axis=1)[:, ::-1][:, :2]]
nonMem_index = np.random.randint(0, x_.shape[0], size=20)
nonMem_pred = target_model.predict(non_Mem_Generator(x_[nonMem_index]))
nonMem = tf.convert_to_tensor(np.c_[nonMem_pred[y_true[nonMem_index].astype(bool)],
np.sort(nonMem_pred, axis=1)[:, ::-1][:, :2]])
data = tf.data.Dataset.from_tensor_slices((mix, m_true)).shuffle(buffer_size=x_.shape[0]).\
batch(20).prefetch(tf.data.experimental.AUTOTUNE)
m_pred, m_true = [], []
mix_shuffled = []
for (mix_batch, m_true_batch) in data:
m_pred_batch = np.ones(mix_batch.shape[0])
m_pred_epoch = np.ones(mix_batch.shape[0])
nonMemInMix = True
while nonMemInMix:
mix_epoch_new = mix_batch[m_pred_epoch.astype(bool)]
dis_ori = mmd_loss(nonMem, mix_epoch_new, weight=1)
nonMemInMix = False
for index, item in tqdm(enumerate(mix_batch)):
if m_pred_batch[index] == 1:
nonMem_batch_new = tf.concat([nonMem, [mix_batch[index]]], axis=0)
mix_batch_new = tf.concat([mix_batch[:index], mix_batch[index+1:]], axis=0)
m_pred_without = np.r_[m_pred_batch[:index], m_pred_batch[index+1:]]
mix_batch_new = mix_batch_new[m_pred_without.astype(bool, copy=True)]
dis_new = mmd_loss(nonMem_batch_new, mix_batch_new, weight=1)
if dis_new > dis_ori:
nonMemInMix = True
m_pred_epoch[index] = 0
m_pred_batch = m_pred_epoch.copy()
mix_shuffled.append(mix_batch)
m_pred.append(m_pred_batch)
m_true.append(m_true_batch)
return np.concatenate(m_true, axis=0), np.concatenate(m_pred, axis=0), \
np.concatenate(mix_shuffled, axis=0), nonMem
m_true, m_pred, mix, nonMem = diff_Mem_attack(np.r_[x_train_tar, x_test_tar],
np.r_[y_train_tar, y_test_tar],
m_true, Target_Model)
evaluate_attack(m_true, m_pred)