krishnasrikard
Codes
2cda712
"""
Testing the performance deep learning models on images/features to predict whether an image is fake/synthetic or real/natural.
"""
# Importing Libraries
import numpy as np
import torch
import os, sys, warnings
warnings.filterwarnings("ignore")
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from yaml import safe_load
import prior_methods.prior_functions.prior_networks as prior_networks
import prior_methods.prior_functions.prior_module as prior_module
import functions.utils as utils
import defaults
# Calling Main function
if __name__ == '__main__':
# -----------------------------------------------------------------
# Flushing Output
import functools
print = functools.partial(print, flush=True)
# Saving stdout
sys.stdout = open('prior_results/{}.log'.format(os.path.basename(__file__)[:-3]), 'w')
# -----------------------------------------------------------------
# Parsing Argumens
args = utils.parser_args()
# Iterate
train_test_dataset_types_list = [
("DRCT", "UnivFD"), ("DRCT", 'GenImage'),
("GenImage", "DRCT"), ("GenImage", "UnivFD"),
("GenImage", "GenImage"), ("DRCT", "DRCT"),
]
# For each train and test datasets
for train_dataset_type, test_dataset_type in train_test_dataset_types_list:
# Save folder prefix
if train_dataset_type == test_dataset_type:
prefix = ""
else:
prefix = 'Cross_'
# Pre-Process Settings based on dataset
if train_dataset_type == test_dataset_type:
if test_dataset_type == "UnivFD":
preprocess_settings_list = [
# Default
({"probability": -1, "gaussian_blur_range": None, "jpeg_compression_qfs": None, "input_image_dimensions": (224,224), "resize": None}, "default"),
]
else:
preprocess_settings_list = [
# Default
({"probability": -1, "gaussian_blur_range": None, "jpeg_compression_qfs": None, "input_image_dimensions": (224,224), "resize": None}, "default"),
# Gaussian-Blur
({"probability": 1, "gaussian_blur_range": [1,1], "jpeg_compression_qfs": None, "input_image_dimensions": (224,224), "resize": None}, "sigma=1"),
({"probability": 1, "gaussian_blur_range": [2,2], "jpeg_compression_qfs": None, "input_image_dimensions": (224,224), "resize": None}, "sigma=2"),
({"probability": 1, "gaussian_blur_range": [3,3], "jpeg_compression_qfs": None, "input_image_dimensions": (224,224), "resize": None}, "sigma=3"),
({"probability": 1, "gaussian_blur_range": [4,4], "jpeg_compression_qfs": None, "input_image_dimensions": (224,224), "resize": None}, "sigma=4"),
({"probability": 1, "gaussian_blur_range": [5,5], "jpeg_compression_qfs": None, "input_image_dimensions": (224,224), "resize": None}, "sigma=5"),
# JPEG-Compression
({"probability": 1, "gaussian_blur_range": None, "jpeg_compression_qfs": [90,90], "input_image_dimensions": (224,224), "resize": None}, "jpegQF=90"),
({"probability": 1, "gaussian_blur_range": None, "jpeg_compression_qfs": [80,80], "input_image_dimensions": (224,224), "resize": None}, "jpegQF=80"),
({"probability": 1, "gaussian_blur_range": None, "jpeg_compression_qfs": [70,70], "input_image_dimensions": (224,224), "resize": None}, "jpegQF=70"),
({"probability": 1, "gaussian_blur_range": None, "jpeg_compression_qfs": [60,60], "input_image_dimensions": (224,224), "resize": None}, "jpegQF=60"),
({"probability": 1, "gaussian_blur_range": None, "jpeg_compression_qfs": [50,50], "input_image_dimensions": (224,224), "resize": None}, "jpegQF=50"),
({"probability": 1, "gaussian_blur_range": None, "jpeg_compression_qfs": [40,40], "input_image_dimensions": (224,224), "resize": None}, "jpegQF=40"),
({"probability": 1, "gaussian_blur_range": None, "jpeg_compression_qfs": [30,30], "input_image_dimensions": (224,224), "resize": None}, "jpegQF=30"),
]
else:
preprocess_settings_list = [
# Default
({"probability": -1, "gaussian_blur_range": None, "jpeg_compression_qfs": None, "input_image_dimensions": (224,224), "resize": None}, "default"),
]
# For each preprocess_settings
for preprocess_settings, suffix in preprocess_settings_list:
# Inference-Restriction-1: Config Files
"""
- Inference only on limited feature extractors for various kinds of image distortions
"""
if train_dataset_type == "DRCT":
# Config Filenames
config_filenames = [
"drct-convnext-b",
"drct-clip-vit-l-14",
]
elif train_dataset_type == "GenImage":
config_filenames = [
"clip-resnet50",
"clip-vit-l-14",
"drct-convnext-b",
"drct-clip-vit-l-14",
]
else:
config_filenames = [
"clip-resnet50",
"clip-vit-l-14",
]
# Iterating for each config_filename
for config_filename in config_filenames:
# Loading Config file
dir_path = os.path.dirname(os.path.realpath(__file__))
args.config = os.path.join(dir_path, "prior_configs/{}.yaml".format(config_filename))
with open(args.config, 'r') as f:
config:dict = safe_load(f)
# Inference-Restriction-2: Variants of Training: Removed
"""
- Inference only on limited feature extractors for basic list of image distortions
"""
if config_filename == "clip-vit-l-14" or config_filename == "clip-resnet50":
checkpoint_directories = ["p=0.5_standard/CrossEntropy"]
else:
checkpoint_directories = ["extensive/MarginContrastiveLoss_CrossEntropy"]
# For each training variant
for ckpt_dir in checkpoint_directories:
# Changes: (resume_ckpt_path, checkpoint_dirname, checkpoint_filename, dataset_type)
config["checkpoints"]["resume_dirname"] = os.path.join(train_dataset_type, ckpt_dir)
config["checkpoints"]["resume_filename"] = "best_model.pth"
config["checkpoints"]["checkpoint_dirname"] = ckpt_dir
config["checkpoints"]["checkpoint_filename"] = "best_model.pth"
config["dataset"]["dataset_type"] = test_dataset_type
# Threshold for calculating metrics
if test_dataset_type == 'UnivFD':
best_threshold = None
else:
best_threshold = 0.5
# Setting model_name and preprocess_type for Pre-processing
preprocess_settings["model_name"] = config["dataset"]["model_name"]
preprocess_settings["selected_transforms_name"] = "test"
# Dataset-Type
dataset_type = config["dataset"]["dataset_type"]
# Model
model_name = config["dataset"]["model_name"]
f_model_name = config["dataset"]["f_model_name"]
# Model
feature_extractor, classifier = prior_networks.get_model(model_name=config["dataset"]["model_name"], device="cuda")
# Loading Resume-Checkpoints
config["checkpoints"]["resume_ckpt_path"] = os.path.join(defaults.main_prior_checkpoints_dir, config["checkpoints"]["resume_dirname"], f_model_name, config["checkpoints"]["resume_filename"])
# Loading Weights of Classifier
if model_name.__contains__("drct"):
# Loading entire model i.e CLIP/ConvNext weights as well as additional layer weights.
initial_weights = classifier.state_dict()
trained_weights = torch.load(config["checkpoints"]["resume_ckpt_path"])
for (k1,v1), (k2,v2) in zip(initial_weights.items(), trained_weights.items()):
assert v1.shape == v2.shape, "Weights Mismatch"
initial_weights[k1] = trained_weights[k2]
classifier.load_state_dict(initial_weights, strict=True)
elif model_name == "clip-vit-l-14" and train_dataset_type == "UnivFD":
# In this case only the weights of classifier are available.
initial_weights = classifier.state_dict()
trained_weights = torch.load(config["checkpoints"]["resume_ckpt_path"])
for k in trained_weights.keys():
if k == "weight":
assert trained_weights[k].shape == initial_weights["classifier.weight"].shape, "Weights Mismatch"
initial_weights["classifier.weight"] = trained_weights[k]
elif k == 'bias':
assert trained_weights[k].shape == initial_weights["classifier.bias"].shape, "Weights Mismatch"
initial_weights["classifier.bias"] = trained_weights[k]
else:
None
classifier.load_state_dict(initial_weights, strict=True)
else:
# Cases that we trained for comparisions
initial_weights = classifier.state_dict()
trained_weights = torch.load(config["checkpoints"]["resume_ckpt_path"])
for k in trained_weights["model"].keys():
if k == "fc.weight":
assert trained_weights["model"][k].shape == initial_weights["classifier.weight"].shape, "Weights Mismatch"
initial_weights["classifier.weight"] = trained_weights["model"][k]
elif k == 'fc.bias':
assert trained_weights["model"][k].shape == initial_weights["classifier.bias"].shape, "Weights Mismatch"
initial_weights["classifier.bias"] = trained_weights["model"][k]
else:
None
classifier.load_state_dict(initial_weights, strict=True)
# Log
print (
"\n",
"Classifier:", "\n",
classifier, "\n",
"\n"
)
# Assertions
for key in ["dataset_type", "model_name"]:
assert key in config["dataset"], "{} not provided".format(key)
# Image-Sources and Classes
if config["dataset"]["dataset_type"] == "GenImage":
# GenImage Dataset
train_image_sources, test_image_sources = utils.get_GenImage_options()
elif config["dataset"]["dataset_type"] == "UnivFD":
# UnivFD Dataset
train_image_sources, test_image_sources = utils.get_UnivFD_options()
elif config["dataset"]["dataset_type"] == "DRCT":
# DRCT Dataset
train_image_sources, test_image_sources = utils.get_DRCT_options()
else:
assert False, "Invalid Dataset"
# Log
print (
"\n",
"Test-Settings:", "\n",
" "*2, "dataset_type:", dataset_type, "\n",
" "*2, "model_name:", model_name, "\n",
" "*2, "f_model_name:", f_model_name, "\n",
" "*2, "train_image_sources:", train_image_sources, "\n",
" "*2, "test_image_sources:", test_image_sources, "\n",
" "*2, "resume_dirname", config["checkpoints"]["resume_dirname"], "\n",
" "*2, "best_threshold", best_threshold, "\n",
"\n"
)
# Testing
config["train_settings"]["train"] = False
config["train_loss_fn"]["name"] = "CrossEntropy"
config["val_loss_fn"]["name"] = "CrossEntropy"
test_set_metrics, best_threshold = prior_module.run(
feature_extractor=feature_extractor,
classifier=classifier,
config=config,
train_image_sources=train_image_sources,
test_image_sources=test_image_sources,
preprocess_settings=preprocess_settings,
best_threshold=best_threshold,
verbose=False
)
# Saving Results
utils.write_results_csv(
test_set_metrics=test_set_metrics,
test_image_sources=test_image_sources,
f_model_name=f_model_name,
save_path=os.path.join("prior_results", "{}{}_{}_{}".format(prefix, ckpt_dir, train_dataset_type, test_dataset_type), "{}.csv".format(suffix)),
)
print ("\n"*2)