attack_model

  1"""
  2.. include:: ../docs/attack_model.md
  3"""
  4
  5from os import environ
  6
  7# Tensorflow C++ backend logging verbosity
  8environ["TF_CPP_MIN_LOG_LEVEL"] = "2"  # NOQA
  9
 10from os.path import dirname, join
 11
 12from typing import Dict, List, Tuple
 13from tensorflow import keras
 14from tensorflow.data import Dataset  # pyright: ignore
 15from tensorflow.python.framework import random_seed
 16from tensorflow.keras.activations import relu  # pyright: ignore
 17from tensorflow.keras.initializers import glorot_uniform  # pyright: ignore
 18from tensorflow.keras.layers import Dense, InputLayer, Softmax  # pyright: ignore
 19from tensorflow.keras import Sequential  # pyright: ignore
 20import numpy as np
 21
 22import utils
 23import datasets as ds
 24
 25global_seed: int = 1234
 26
 27
 28def set_seed(new_seed: int):
 29    """
 30    Set the global seed that will be used for all functions that include
 31    randomness.
 32    """
 33    global global_seed
 34    global_seed = new_seed
 35    random_seed.set_seed(global_seed)
 36
 37
 38class KaggleAttackModel(Sequential):
 39    """
 40    Architecture:
 41        Fully connected NN,
 42        1 hiddenlayer, size 64,
 43        ReLU activation
 44        Softmax LAyer
 45
 46    One model for each class
 47    """
 48
 49    def __init__(self, numClasses: int) -> None:
 50        super().__init__()
 51        activation = relu
 52        initializer = glorot_uniform
 53        self.add(InputLayer(input_shape=(numClasses)))
 54        self.add(Dense(64, activation=activation,
 55                 kernel_initializer=initializer))
 56        self.add(Dense(2, kernel_initializer=initializer))
 57        self.add(Softmax())
 58
 59
 60def load_model(name: str, verbose=True) -> Sequential:
 61    """
 62    Load model from disk.
 63
 64    The file name will be constructed from the `name` argument.
 65    """
 66    if verbose:
 67        print(f"Loading model {name} from disk.")
 68    filePath: str = join(dirname(__file__), "../models/attack", name)
 69    return keras.models.load_model(filePath)
 70
 71
 72def save_model(name: str, model: Sequential) -> None:
 73    """
 74    Save model to disk.
 75
 76    The file name will be constructed from the `name` argument.
 77    """
 78    filePath: str = join(dirname(__file__), "../models/attack", name)
 79    model.save(filePath)
 80
 81
 82def train_model(model: Sequential, modelName: str, trainData: Dataset,
 83                testData: Dataset, hyperpar: Dict):
 84    epochs: int = int(hyperpar["epochs"])
 85    learningRate: float = float(hyperpar["learningRate"])
 86    batchSize: int = int(hyperpar["batchSize"])
 87
 88    optimizer = keras.optimizers.Adam(name="Adam", learning_rate=learningRate)
 89    loss = keras.losses.CategoricalCrossentropy()
 90    metrics = ["accuracy"]
 91
 92    model.compile(optimizer, loss, metrics)
 93    # TODO: drop_remainder: make sure dataset is still 50/50 in/out
 94    trainData = trainData.batch(batchSize, drop_remainder=True)
 95    testData = testData.batch(batchSize, drop_remainder=True)
 96    log_dir = "logs/attack/" + modelName
 97    cb = keras.callbacks.TensorBoard(histogram_freq=1, log_dir=log_dir)
 98    return model.fit(trainData, epochs=epochs, callbacks=[cb], validation_data=testData)
 99
100
101def evaluate_models(models: List[Sequential], datasets: List[ds.Dataset]):
102    # TODO: Evaluate on randomly reshuffled records from test/train dataset
103    assert len(models) == len(datasets)
104    test_accuracies = []
105    train_accuracies = []
106    for i in range(len(models)):
107        testData = datasets[i][0]
108        trainData = datasets[i][1]
109        test_accuracy = evaluate_model(models[i], testData)[1]
110        train_accuracy = evaluate_model(models[i], trainData)[1]
111        test_accuracies.append(test_accuracy)
112        train_accuracies.append(train_accuracy)
113
114    hash = utils.hash(str(config))
115
116    with open(f"{hash}_attackModelTrainAccuracy.csv",'w') as file:
117        file.write(f"Attack Model Training Accuracies (Overall:{np.average(train_accuracies)})\n")
118        for train_acc in train_accuracies:
119            file.write(f"{train_acc}\n")
120    with open(f"{hash}_attackModelTestAccuracy.csv",'w') as file:
121        file.write(f"Attack Model Testing Accuracies (Overall:{np.average(test_accuracies)})\n")
122        for test_acc in test_accuracies:
123            file.write(f"{test_acc}\n")
124
125
126def evaluate_model(model: Sequential, dataset: Dataset):
127    # TODO: batchSize is hardcoded
128    batchSize = 10
129    dataset = dataset.batch(batchSize, drop_remainder=False)
130    return model.evaluate(dataset)
131
132
133def get_model_name(config: Dict, i: int) -> str:
134    modelConfig = config["attackModel"]["hyperparameters"]
135    numClasses = config["targetModel"]["classes"]
136    return \
137        f'{config["targetDataset"]["name"]}_' + \
138        f'{config["shadowDataset"]["method"]}_' + \
139        f'lr_{modelConfig["learningRate"]}_' + \
140        f'bs_{modelConfig["batchSize"]}_' + \
141        f'epochs_{modelConfig["epochs"]}_' + \
142        f'{i+1}_of_{numClasses}'
143
144
145def get_attack_models(config: Dict, attackDatasets: List[Tuple[ds.Dataset, ds.Dataset]]) -> List[KaggleAttackModel]:
146    verbose = config["verbose"]
147    modelConfig = config["attackModel"]["hyperparameters"]
148    numClasses = config["targetModel"]["classes"]
149    attackModels = []
150
151    print(f"Loading attack models from disk.")
152    for i in range(numClasses):
153        modelName = get_model_name(config, i)
154        try:
155            model: KaggleAttackModel = load_model(modelName, verbose=verbose)
156        except BaseException:
157            print(f"Couldn't load attack model {i+1}, retraining.")
158            testData, trainData = attackDatasets[i]
159            trainData = ds.shuffle(trainData)
160
161            model = KaggleAttackModel(config["targetModel"]["classes"])
162
163            train_model(model, modelName, trainData, testData, modelConfig)
164
165            print(f"Saving attack model {i+1} to disk.")
166            model._name = modelName
167            save_model(modelName, model)
168            evaluate_model(model, testData)
169
170        attackModels.append(model)
171
172    return attackModels
173
174
175if __name__ == "__main__":
176    import argparse
177    import configuration as con
178    import attack_data as ad
179
180    parser = argparse.ArgumentParser(description='Train the attack models.')
181    parser.add_argument('--config', help='Relative path to config file.',)
182    config = con.from_cli_options(vars(parser.parse_args()))
183    set_seed(config["seed"])
184
185    attackDatasets = ad.load_attack_data(config)
186    attackModels = get_attack_models(config, attackDatasets)
187    evaluate_models(attackModels, attackDatasets)
def set_seed(new_seed: int):
29def set_seed(new_seed: int):
30    """
31    Set the global seed that will be used for all functions that include
32    randomness.
33    """
34    global global_seed
35    global_seed = new_seed
36    random_seed.set_seed(global_seed)

Set the global seed that will be used for all functions that include randomness.

class KaggleAttackModel(keras.engine.sequential.Sequential):
39class KaggleAttackModel(Sequential):
40    """
41    Architecture:
42        Fully connected NN,
43        1 hiddenlayer, size 64,
44        ReLU activation
45        Softmax LAyer
46
47    One model for each class
48    """
49
50    def __init__(self, numClasses: int) -> None:
51        super().__init__()
52        activation = relu
53        initializer = glorot_uniform
54        self.add(InputLayer(input_shape=(numClasses)))
55        self.add(Dense(64, activation=activation,
56                 kernel_initializer=initializer))
57        self.add(Dense(2, kernel_initializer=initializer))
58        self.add(Softmax())

Architecture: Fully connected NN, 1 hiddenlayer, size 64, ReLU activation Softmax LAyer

One model for each class

KaggleAttackModel(numClasses: int)
50    def __init__(self, numClasses: int) -> None:
51        super().__init__()
52        activation = relu
53        initializer = glorot_uniform
54        self.add(InputLayer(input_shape=(numClasses)))
55        self.add(Dense(64, activation=activation,
56                 kernel_initializer=initializer))
57        self.add(Dense(2, kernel_initializer=initializer))
58        self.add(Softmax())

Creates a Sequential model instance.

Args: layers: Optional list of layers to add to the model. name: Optional name for the model.

Inherited Members
keras.engine.sequential.Sequential
supports_masking
add
pop
build
call
compute_output_shape
compute_mask
get_config
from_config
input_spec
keras.engine.functional.Functional
input
input_shape
output
output_shape
keras.engine.training.Model
compile
metrics
metrics_names
distribute_strategy
run_eagerly
train_step
compute_loss
compute_metrics
make_train_function
fit
test_step
make_test_function
evaluate
predict_step
make_predict_function
predict
reset_metrics
train_on_batch
test_on_batch
predict_on_batch
fit_generator
evaluate_generator
predict_generator
trainable_weights
non_trainable_weights
get_weights
save
save_weights
load_weights
to_json
to_yaml
reset_states
state_updates
weights
summary
get_layer
save_spec
keras.engine.base_layer.Layer
add_weight
compute_output_signature
dtype
name
dynamic
trainable
activity_regularizer
losses
add_loss
add_metric
add_update
set_weights
finalize_state
get_input_mask_at
get_output_mask_at
input_mask
output_mask
get_input_shape_at
get_output_shape_at
get_input_at
get_output_at
count_params
dtype_policy
compute_dtype
variable_dtype
inbound_nodes
outbound_nodes
variables
trainable_variables
non_trainable_variables
add_variable
tensorflow.python.module.module.Module
name_scope
submodules
with_name_scope
def load_model(name: str, verbose=True) -> keras.engine.sequential.Sequential:
61def load_model(name: str, verbose=True) -> Sequential:
62    """
63    Load model from disk.
64
65    The file name will be constructed from the `name` argument.
66    """
67    if verbose:
68        print(f"Loading model {name} from disk.")
69    filePath: str = join(dirname(__file__), "../models/attack", name)
70    return keras.models.load_model(filePath)

Load model from disk.

The file name will be constructed from the name argument.

def save_model(name: str, model: keras.engine.sequential.Sequential) -> None:
73def save_model(name: str, model: Sequential) -> None:
74    """
75    Save model to disk.
76
77    The file name will be constructed from the `name` argument.
78    """
79    filePath: str = join(dirname(__file__), "../models/attack", name)
80    model.save(filePath)

Save model to disk.

The file name will be constructed from the name argument.

def train_model( model: keras.engine.sequential.Sequential, modelName: str, trainData: tensorflow.python.data.ops.dataset_ops.DatasetV2, testData: tensorflow.python.data.ops.dataset_ops.DatasetV2, hyperpar: Dict):
83def train_model(model: Sequential, modelName: str, trainData: Dataset,
84                testData: Dataset, hyperpar: Dict):
85    epochs: int = int(hyperpar["epochs"])
86    learningRate: float = float(hyperpar["learningRate"])
87    batchSize: int = int(hyperpar["batchSize"])
88
89    optimizer = keras.optimizers.Adam(name="Adam", learning_rate=learningRate)
90    loss = keras.losses.CategoricalCrossentropy()
91    metrics = ["accuracy"]
92
93    model.compile(optimizer, loss, metrics)
94    # TODO: drop_remainder: make sure dataset is still 50/50 in/out
95    trainData = trainData.batch(batchSize, drop_remainder=True)
96    testData = testData.batch(batchSize, drop_remainder=True)
97    log_dir = "logs/attack/" + modelName
98    cb = keras.callbacks.TensorBoard(histogram_freq=1, log_dir=log_dir)
99    return model.fit(trainData, epochs=epochs, callbacks=[cb], validation_data=testData)
def evaluate_models( models: List[keras.engine.sequential.Sequential], datasets: List[tensorflow.python.data.ops.dataset_ops.DatasetV2]):
102def evaluate_models(models: List[Sequential], datasets: List[ds.Dataset]):
103    # TODO: Evaluate on randomly reshuffled records from test/train dataset
104    assert len(models) == len(datasets)
105    test_accuracies = []
106    train_accuracies = []
107    for i in range(len(models)):
108        testData = datasets[i][0]
109        trainData = datasets[i][1]
110        test_accuracy = evaluate_model(models[i], testData)[1]
111        train_accuracy = evaluate_model(models[i], trainData)[1]
112        test_accuracies.append(test_accuracy)
113        train_accuracies.append(train_accuracy)
114
115    hash = utils.hash(str(config))
116
117    with open(f"{hash}_attackModelTrainAccuracy.csv",'w') as file:
118        file.write(f"Attack Model Training Accuracies (Overall:{np.average(train_accuracies)})\n")
119        for train_acc in train_accuracies:
120            file.write(f"{train_acc}\n")
121    with open(f"{hash}_attackModelTestAccuracy.csv",'w') as file:
122        file.write(f"Attack Model Testing Accuracies (Overall:{np.average(test_accuracies)})\n")
123        for test_acc in test_accuracies:
124            file.write(f"{test_acc}\n")
def evaluate_model( model: keras.engine.sequential.Sequential, dataset: tensorflow.python.data.ops.dataset_ops.DatasetV2):
127def evaluate_model(model: Sequential, dataset: Dataset):
128    # TODO: batchSize is hardcoded
129    batchSize = 10
130    dataset = dataset.batch(batchSize, drop_remainder=False)
131    return model.evaluate(dataset)
def get_model_name(config: Dict, i: int) -> str:
134def get_model_name(config: Dict, i: int) -> str:
135    modelConfig = config["attackModel"]["hyperparameters"]
136    numClasses = config["targetModel"]["classes"]
137    return \
138        f'{config["targetDataset"]["name"]}_' + \
139        f'{config["shadowDataset"]["method"]}_' + \
140        f'lr_{modelConfig["learningRate"]}_' + \
141        f'bs_{modelConfig["batchSize"]}_' + \
142        f'epochs_{modelConfig["epochs"]}_' + \
143        f'{i+1}_of_{numClasses}'
def get_attack_models( config: Dict, attackDatasets: List[Tuple[tensorflow.python.data.ops.dataset_ops.DatasetV2, tensorflow.python.data.ops.dataset_ops.DatasetV2]]) -> List[attack_model.KaggleAttackModel]:
146def get_attack_models(config: Dict, attackDatasets: List[Tuple[ds.Dataset, ds.Dataset]]) -> List[KaggleAttackModel]:
147    verbose = config["verbose"]
148    modelConfig = config["attackModel"]["hyperparameters"]
149    numClasses = config["targetModel"]["classes"]
150    attackModels = []
151
152    print(f"Loading attack models from disk.")
153    for i in range(numClasses):
154        modelName = get_model_name(config, i)
155        try:
156            model: KaggleAttackModel = load_model(modelName, verbose=verbose)
157        except BaseException:
158            print(f"Couldn't load attack model {i+1}, retraining.")
159            testData, trainData = attackDatasets[i]
160            trainData = ds.shuffle(trainData)
161
162            model = KaggleAttackModel(config["targetModel"]["classes"])
163
164            train_model(model, modelName, trainData, testData, modelConfig)
165
166            print(f"Saving attack model {i+1} to disk.")
167            model._name = modelName
168            save_model(modelName, model)
169            evaluate_model(model, testData)
170
171        attackModels.append(model)
172
173    return attackModels