-
Notifications
You must be signed in to change notification settings - Fork 11
/
Copy pathanalysis_text_length.sh
34 lines (31 loc) · 1.26 KB
/
analysis_text_length.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
#!/bin/bash
BLOCK_SIZE=128
EPOCHS=50
BATCH_SIZE=8
LEARNING_RATE=5e-4
GRADIENT_ACCUMULATION_STEPS=1
DATASET_NAME=wjfu99/WikiMIA-24
MODEL=meta-llama/Llama-2-7b-hf
for BLOCK_SIZE in 32 64 128 256
do
if [ ${BLOCK_SIZE} -eq 256 ]; then
MAX_TRAIN_SAMPLES =148
else
MAX_TRAIN_SAMPLES = 160
fi
accelerate launch mia_hybrid.py -m ${MODEL} --unaligned_model -d ${DATASET_NAME} --max_train_samples ${MAX_TRAIN_SAMPLES} \
--block_size ${BLOCK_SIZE} --epochs ${EPOCHS} --batch_size ${BATCH_SIZE} --learning_rate ${LEARNING_RATE} --gradient_accumulation_steps ${GRADIENT_ACCUMULATION_STEPS}
python run_baselines.py --model ${MODEL} --dataset ${DATASET_NAME} --block_size ${BLOCK_SIZE}
done
MODEL=meta-llama/Llama-2-7b-chat-hf
for BLOCK_SIZE in 32 64 128 256
do
if [ ${BLOCK_SIZE} -eq 256 ]; then
MAX_TRAIN_SAMPLES =148
else
MAX_TRAIN_SAMPLES = 160
fi
accelerate launch mia_hybrid.py -m ${MODEL} -d ${DATASET_NAME} --max_train_samples ${MAX_TRAIN_SAMPLES} \
--block_size ${BLOCK_SIZE} --epochs ${EPOCHS} --batch_size ${BATCH_SIZE} --learning_rate ${LEARNING_RATE} --gradient_accumulation_steps ${GRADIENT_ACCUMULATION_STEPS}
python run_baselines.py --model ${MODEL} --dataset ${DATASET_NAME} --block_size ${BLOCK_SIZE}
done