File size: 6,779 Bytes
2cda712
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d9c7b8a
 
 
 
2cda712
d9c7b8a
 
2cda712
 
 
d9c7b8a
 
 
 
 
 
 
 
 
2cda712
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94421ed
 
d9c7b8a
2cda712
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
"""
Testing the performance deep learning models on images/features to predict whether an image is fake/synthetic or real/natural.
"""
# Importing Libraries
import numpy as np

import torch
torch.set_float32_matmul_precision('medium')
import os, sys, warnings
warnings.filterwarnings("ignore")
from yaml import safe_load
from functions.loss_optimizers_metrics import *
from functions.run_on_images_fn import run_on_images
import functions.utils as utils
import functions.networks as networks
import defaults

# Get all images from new_images_to_test folder
import glob
test_images_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "new_images_to_test")
image_extensions = ['*.jpg', '*.jpeg', '*.png', '*.JPG', '*.JPEG', '*.PNG']
test_real_images_paths = []
for ext in image_extensions:
	test_real_images_paths.extend([os.path.abspath(p) for p in glob.glob(os.path.join(test_images_dir, ext))])

test_fake_images_paths = []

if not test_real_images_paths:
	print(f"Error: No images found in {test_images_dir}/")
	sys.exit(1)

print(f"Found {len(test_real_images_paths)} image(s) to test:")
for img in test_real_images_paths:
	print(f"  - {os.path.basename(img)}")
print()

# Calling Main function
if __name__ == '__main__':
	# -----------------------------------------------------------------
	# Flushing Output
	import functools
	print = functools.partial(print, flush=True)

	# Saving stdout
	sys.stdout = open('results/{}.log'.format(os.path.basename(__file__)[:-3]), 'w')

	# -----------------------------------------------------------------

	# Parsing Argumens
	args = utils.parser_args()

	# Iterate
	train_test_dataset_types_list = [("GenImage", "GenImage")]


	# For each train and test datasets
	for train_dataset_type, test_dataset_type in train_test_dataset_types_list:
		# Save folder prefix
		if train_dataset_type == test_dataset_type:
			prefix = ""
		else:
			prefix = 'Cross_'


		# Pre-Process Settings based on dataset
		preprocess_settings_list = [
			# Default
				({"probability": -1, "gaussian_blur_range": None, "jpeg_compression_qfs": None, "input_image_dimensions": (224,224), "resize": None}, "default"),
		]

		
		# For each preprocess_settings
		for preprocess_settings, suffix in preprocess_settings_list:
			# Inference-Restriction-1: Config Files
			"""
			- Inference only on limited feature extractors for various kinds of image distortions
			"""
			# Config Filenames
			config_filenames = [
				# "hyperiqa",
				# "tres",
				"contrique",
				# "reiqa",
				# "arniqa"
			]

			# Iterating for each config_filename
			for config_filename in config_filenames:
				# Loading Config file
				dir_path = os.path.dirname(os.path.realpath(__file__))
				args.config = os.path.join(dir_path, "configs/{}.yaml".format(config_filename))
				with open(args.config, 'r') as f:
					config:dict = safe_load(f)


				# Inference-Restriction-2: Variants of Training: Removed
				"""
				- Inference only on limited feature extractors for basic list of image distortions
				"""
				checkpoint_directories = [
					"extensive/MarginContrastiveLoss_CrossEntropy"
				]


				# For each training variant
				for ckpt_dir in checkpoint_directories:
					# Changes: (resume_ckpt_path, checkpoint_dirname, checkpoint_filename, dataset_type)
					config["checkpoints"]["resume_dirname"] = os.path.join(train_dataset_type, ckpt_dir)
					config["checkpoints"]["resume_filename"] = "best_model.ckpt"
					config["checkpoints"]["checkpoint_dirname"] = ckpt_dir
					config["checkpoints"]["checkpoint_filename"] = "best_model.ckpt"
					config["dataset"]["dataset_type"] = test_dataset_type


					# Threshold for calculating metrics
					if test_dataset_type == 'UnivFD':
						best_threshold = None
					else:
						best_threshold = 0.5


					# Setting model_name and preprocess_type for Pre-processing
					preprocess_settings["model_name"] = config["dataset"]["model_name"]
					preprocess_settings["selected_transforms_name"] = "test"

					
					# Dataset-Type
					dataset_type = config["dataset"]["dataset_type"]

					# Model
					model_name = config["dataset"]["model_name"]
					f_model_name = config["dataset"]["f_model_name"]


					# Model - use CPU (MPS has compatibility issues with adaptive pooling)
					device = "cpu"  # Use "cuda" for NVIDIA GPU
					feature_extractor = networks.get_model(model_name=config["dataset"]["model_name"], device=device)

					
					# Classifier
					config["classifier"]["hidden_layers"] = [1024]
					classifier = networks.Classifier_Arch2(
						input_dim=config["classifier"]["input_dim"],
						hidden_layers=config["classifier"]["hidden_layers"]
					)

					# Log
					print (
						"\n",
						"Classifier:", "\n",
						classifier, "\n",
						"\n"
					)


					# Assertions
					for key in ["dataset_type", "model_name"]:
						assert key in config["dataset"], "{} not provided".format(key)

					
					# Image-Sources and Classes
					if config["dataset"]["dataset_type"] == "GenImage":
						# GenImage Dataset
						train_image_sources, test_image_sources = utils.get_GenImage_options()

					elif config["dataset"]["dataset_type"] == "UnivFD":
						# UnivFD Dataset
						train_image_sources, test_image_sources = utils.get_UnivFD_options()

					elif config["dataset"]["dataset_type"] == "DRCT":
						# DRCT Dataset
						train_image_sources, test_image_sources = utils.get_DRCT_options()

					else:
						assert False, "Invalid Dataset"
				
					
					# Log
					print (
						"\n",
						"Test-Settings:", "\n",
						" "*2, "dataset_type:", dataset_type, "\n",
						" "*2, "model_name:", model_name, "\n",
						" "*2, "f_model_name:", f_model_name, "\n",
						" "*2, "train_image_sources:", train_image_sources, "\n",
						" "*2, "test_image_sources:", test_image_sources, "\n",
						" "*2, "resume_dirname", config["checkpoints"]["resume_dirname"], "\n",
						" "*2, "best_threshold", best_threshold, "\n",
						"\n"
					)


					# Testing
					config["train_settings"]["train"] = False
					config["train_loss_fn"]["name"] = "CrossEntropy"
					config["val_loss_fn"]["name"] = "CrossEntropy"
					
					test_set_metrics, best_threshold, y_pred, y_true = run_on_images(
						feature_extractor=feature_extractor, 
						classifier=classifier, 
						config=config, 
						test_real_images_paths=test_real_images_paths,
						test_fake_images_paths=test_fake_images_paths,
						preprocess_settings=preprocess_settings,
						best_threshold=best_threshold,
						verbose=False
					)

					print (y_pred)


					# Saving Predictions
					"""
					predictions = []
					for i in range(len(test_real_images_paths)):
						predictions.append(
							[test_real_images_paths[i], str(y_pred[i]), str(y_true[i])]
						)
					np.save("misc/predictions.npy", predictions)
					"""