faryalnimra commited on
Commit
616afb0
·
1 Parent(s): 8231bc1

Added nbackend folder

Browse files
Files changed (2) hide show
  1. app.py +407 -0
  2. requirements.txt +0 -0
app.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify, render_template, url_for
2
+ from flask_cors import CORS
3
+ import torch
4
+ import torch.nn as nn
5
+ from torchvision import models, transforms
6
+ from PIL import Image
7
+ from huggingface_hub import hf_hub_download
8
+ import os
9
+ from mtcnn import MTCNN
10
+ import cv2
11
+ from flask_bcrypt import generate_password_hash, check_password_hash
12
+ from pymongo import MongoClient
13
+ import numpy as np
14
+ from werkzeug.security import generate_password_hash, check_password_hash
15
+ from werkzeug.utils import secure_filename
16
+ import logging
17
+ import matplotlib.pyplot as plt
18
+ import seaborn as sns
19
+ from transformers import AutoImageProcessor, AutoModelForImageClassification # New imports
20
+
21
+ # Setup logging
22
+ logging.basicConfig(level=logging.INFO)
23
+
24
+ app = Flask(__name__, template_folder="templates", static_folder="static")
25
+ CORS(app)
26
+
27
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
28
+ UPLOAD_FOLDER = "static/uploads"
29
+ os.makedirs(UPLOAD_FOLDER, exist_ok=True)
30
+
31
+ # ------------------- Model Loading Functions -------------------
32
+
33
+ def load_model_from_hf(repo_id, filename, num_classes):
34
+ model_path = hf_hub_download(repo_id=repo_id, filename=filename)
35
+ model = models.convnext_tiny(weights=None)
36
+ in_features = model.classifier[2].in_features
37
+ model.classifier[2] = nn.Linear(in_features, num_classes)
38
+ model.load_state_dict(torch.load(model_path, map_location=device))
39
+ model.to(device)
40
+ model.eval()
41
+ return model
42
+
43
+ # Load the existing deepfake/cheapfake models
44
+ deepfake_model = load_model_from_hf("faryalnimra/DFDC-detection-model", "DFDC.pth", 2)
45
+ cheapfake_model = load_model_from_hf("faryalnimra/ORIG-TAMP", "ORIG-TAMP.pth", 1)
46
+
47
+ # ------------------- New Real/Fake Detector Model -------------------
48
+ # This model determines if the uploaded image is real (label 1) or fake (label 0)
49
+ model_name = "prithivMLmods/Deep-Fake-Detector-Model"
50
+ processor = AutoImageProcessor.from_pretrained(model_name, use_fast=False)
51
+ realfake_detector = AutoModelForImageClassification.from_pretrained(model_name)
52
+ realfake_detector.to(device)
53
+ realfake_detector.eval()
54
+
55
+ # ------------------- Image Preprocessing -------------------
56
+
57
+ transform = transforms.Compose([
58
+ transforms.ToTensor(),
59
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
60
+ ])
61
+
62
+ # ------------------- Face Detector -------------------
63
+
64
+ face_detector = MTCNN()
65
+
66
+ def detect_face(image_path):
67
+ image = cv2.imread(image_path)
68
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
69
+ faces = face_detector.detect_faces(image_rgb)
70
+ face_count = sum(1 for face in faces if face.get("confidence", 0) > 0.90 and face.get("box", [0, 0, 0, 0])[2] > 30)
71
+ return face_count
72
+
73
+ # ------------------- API Endpoint: /predict -------------------
74
+ @app.route("/predict", methods=["POST"])
75
+ def predict():
76
+ if "file" not in request.files:
77
+ return jsonify({"error": "No file uploaded"}), 400
78
+
79
+ file = request.files["file"]
80
+ prediction_type = request.form.get("prediction_type", "real_vs_fake") # default
81
+
82
+ filename = os.path.join(UPLOAD_FOLDER, file.filename)
83
+ file.save(filename)
84
+
85
+ try:
86
+ image = Image.open(filename).convert("RGB")
87
+ image_tensor = transform(image).unsqueeze(0).to(device)
88
+ except Exception as e:
89
+ return jsonify({"error": "Error processing image", "details": str(e)}), 500
90
+
91
+ # --------- CASE 1: ONLY Real/Fake Prediction ----------
92
+ if prediction_type == "real_vs_fake":
93
+ with torch.no_grad():
94
+ inputs = processor(images=image, return_tensors="pt").to(device)
95
+ outputs_realfake = realfake_detector(**inputs)
96
+ pred_label = torch.argmax(outputs_realfake.logits, dim=1).item()
97
+
98
+ if pred_label == 1:
99
+ return jsonify({
100
+ "prediction": "Fake",
101
+ "message": "Image is fake, but type (Deepfake/Cheapfake) not determined in this mode.",
102
+ "image_url": url_for("static", filename=f"uploads/{file.filename}")
103
+ })
104
+ else:
105
+ return jsonify({
106
+ "prediction": "Real",
107
+ "message": "Image is authentic. No further processing.",
108
+ "image_url": url_for("static", filename=f"uploads/{file.filename}")
109
+ })
110
+
111
+ # --------- CASE 2: Deepfake vs Cheapfake Analysis ----------
112
+ elif prediction_type == "deepfake_vs_cheapfake":
113
+ with torch.no_grad():
114
+ deepfake_probs = torch.softmax(deepfake_model(image_tensor), dim=1)[0]
115
+ deepfake_confidence_before = deepfake_probs[1].item() * 100
116
+ cheapfake_confidence_before = torch.sigmoid(cheapfake_model(image_tensor)).item() * 100
117
+
118
+ face_count = detect_face(filename)
119
+ face_factor = min(face_count / 2, 1)
120
+
121
+ if deepfake_confidence_before <= cheapfake_confidence_before:
122
+ adjusted_deepfake_confidence = deepfake_confidence_before * (1 + 0.3 * face_factor)
123
+ adjusted_cheapfake_confidence = cheapfake_confidence_before * (1 - 0.3 * face_factor)
124
+ else:
125
+ adjusted_deepfake_confidence = deepfake_confidence_before
126
+ adjusted_cheapfake_confidence = cheapfake_confidence_before
127
+
128
+ fake_type = "Deepfake" if adjusted_deepfake_confidence > adjusted_cheapfake_confidence else "Cheapfake"
129
+
130
+ return jsonify({
131
+ "prediction": "Fake",
132
+ "fake_type": fake_type,
133
+ "deepfake_confidence_before": f"{deepfake_confidence_before:.2f}%",
134
+ "deepfake_confidence_adjusted": f"{adjusted_deepfake_confidence:.2f}%",
135
+ "cheapfake_confidence_before": f"{cheapfake_confidence_before:.2f}%",
136
+ "cheapfake_confidence_adjusted": f"{adjusted_cheapfake_confidence:.2f}%",
137
+ "faces_detected": face_count,
138
+ "image_url": url_for("static", filename=f"uploads/{file.filename}")
139
+ })
140
+
141
+ # --------- CASE 3: Invalid prediction_type ---------
142
+ else:
143
+ return jsonify({"error": "Invalid prediction_type. Use 'real_vs_fake' or 'deepfake_vs_cheapfake'"}), 400
144
+
145
+ # ------------------- Heatmap Generator and API -------------------
146
+
147
+
148
+
149
+ # Flask setup
150
+
151
+ UPLOAD_FOLDER = "static/uploads"
152
+ HEATMAP_FOLDER = "static/heatmaps"
153
+ ALLOWED_EXTENSIONS = {"png", "jpg", "jpeg"}
154
+
155
+ os.makedirs(UPLOAD_FOLDER, exist_ok=True)
156
+ os.makedirs(HEATMAP_FOLDER, exist_ok=True)
157
+
158
+ def allowed_file(filename):
159
+ return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
160
+
161
+ # Load your model
162
+ deepfake_model.eval()
163
+
164
+ # Choose the last Conv2D layer
165
+ target_layer = None
166
+ for name, module in deepfake_model.named_modules():
167
+ if isinstance(module, torch.nn.Conv2d):
168
+ target_layer = module
169
+
170
+ # Grad-CAM class
171
+ class GradCAM:
172
+ def __init__(self, model, target_layer):
173
+ self.model = model
174
+ self.target_layer = target_layer
175
+ self.gradients = None
176
+ self.activations = None
177
+ self._register_hooks()
178
+
179
+ def _register_hooks(self):
180
+ def forward_hook(module, input, output):
181
+ self.activations = output.detach()
182
+
183
+ def backward_hook(module, grad_in, grad_out):
184
+ self.gradients = grad_out[0].detach()
185
+
186
+ self.target_layer.register_forward_hook(forward_hook)
187
+ self.target_layer.register_backward_hook(backward_hook)
188
+
189
+ def generate(self, input_tensor, class_idx=None):
190
+ self.model.eval()
191
+ output = self.model(input_tensor)
192
+
193
+ if class_idx is None:
194
+ class_idx = torch.argmax(output, dim=1).item()
195
+
196
+ self.model.zero_grad()
197
+ loss = output[0, class_idx]
198
+ loss.backward()
199
+
200
+ gradients = self.gradients.cpu().numpy()[0]
201
+ activations = self.activations.cpu().numpy()[0]
202
+
203
+ weights = np.mean(gradients, axis=(1, 2))
204
+ cam = np.zeros(activations.shape[1:], dtype=np.float32)
205
+
206
+ for i, w in enumerate(weights):
207
+ cam += w * activations[i, :, :]
208
+
209
+ cam = np.maximum(cam, 0)
210
+ cam = cv2.resize(cam, (input_tensor.size(3), input_tensor.size(2)))
211
+ cam = cam - np.min(cam)
212
+ cam = cam / np.max(cam)
213
+ return cam, output
214
+
215
+ # Preprocessing
216
+ preprocess = transforms.Compose([
217
+ transforms.Resize((224, 224)),
218
+ transforms.ToTensor(),
219
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
220
+ ])
221
+
222
+ gradcam = GradCAM(deepfake_model, target_layer)
223
+
224
+ # Generate heatmap and prediction
225
+ def generate_heatmap(original_image_path, heatmap_save_path):
226
+ img = Image.open(original_image_path).convert("RGB")
227
+ input_tensor = preprocess(img).unsqueeze(0)
228
+
229
+ cam, output = gradcam.generate(input_tensor)
230
+
231
+ # Get prediction
232
+ probabilities = torch.nn.functional.softmax(output, dim=1)[0]
233
+ class_idx = torch.argmax(probabilities).item()
234
+ confidence = probabilities[class_idx].item()
235
+ label = "Fake" if class_idx == 1 else "Real"
236
+
237
+ # Generate heatmap
238
+ heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_JET)
239
+ heatmap = cv2.GaussianBlur(heatmap, (7, 7), 0)
240
+ heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)
241
+
242
+ img_np = np.array(img.resize((224, 224)))
243
+
244
+ superimposed_img = heatmap * 0.5 + img_np * 0.5
245
+ superimposed_img = np.uint8(superimposed_img)
246
+
247
+ Image.fromarray(superimposed_img).save(heatmap_save_path)
248
+
249
+ return label, confidence
250
+
251
+ # Flask route
252
+ @app.route("/generate_heatmap", methods=["POST"])
253
+ def generate_heatmap_api():
254
+ if "file" not in request.files:
255
+ return jsonify({"error": "No file uploaded"}), 400
256
+
257
+ file = request.files["file"]
258
+
259
+ if file.filename == "" or not allowed_file(file.filename):
260
+ return jsonify({"error": "Invalid file type. Allowed types are .png, .jpg, .jpeg"}), 400
261
+
262
+ filename = secure_filename(file.filename)
263
+ original_image_path = os.path.join(UPLOAD_FOLDER, filename)
264
+
265
+ try:
266
+ file.save(original_image_path)
267
+ except Exception as e:
268
+ return jsonify({"error": "Failed to save the file"}), 500
269
+
270
+ heatmap_filename = f"heatmap_{filename}"
271
+ heatmap_path = os.path.join(HEATMAP_FOLDER, heatmap_filename)
272
+
273
+ label, confidence = generate_heatmap(original_image_path, heatmap_path)
274
+
275
+ return jsonify({
276
+ "original_image_url": url_for("static", filename=f"uploads/{filename}", _external=True),
277
+ "heatmap_image_url": url_for("static", filename=f"heatmaps/{heatmap_filename}", _external=True),
278
+ "prediction": label,
279
+ "confidence": f"{confidence:.2f}"
280
+ })
281
+
282
+ # To run:
283
+ # if __name__ == "__main__":
284
+ # app.run(debug=True)
285
+
286
+
287
+
288
+
289
+
290
+
291
+
292
+ #MongoDB Atlantis from flask import Flask, request, jsonify
293
+
294
+
295
+ # MongoDB connection
296
+ client = MongoClient('mongodb+srv://fakecatcherai:[email protected]/?retryWrites=true&w=majority&appName=Cluster0')
297
+ db = client['fakecatcherDB']
298
+ users_collection = db['users']
299
+ contacts_collection = db['contacts']
300
+
301
+ def is_valid_password(password):
302
+ if (len(password) < 8 or
303
+ not re.search(r'[A-Z]', password) or
304
+ not re.search(r'[a-z]', password) or
305
+ not re.search(r'[0-9]', password) or
306
+ not re.search(r'[!@#$%^&*(),.?":{}|<>]', password)):
307
+ return False
308
+ return True
309
+
310
+ @app.route('/Register', methods=['POST'])
311
+ def register():
312
+ data = request.get_json()
313
+ first_name = data.get('firstName')
314
+ last_name = data.get('lastName')
315
+ email = data.get('email')
316
+ password = data.get('password')
317
+
318
+ if users_collection.find_one({'email': email}):
319
+ logging.warning(f"Attempted register with existing email: {email}")
320
+ return jsonify({'message': 'Email already exists!'}), 400
321
+
322
+ # ✅ Password constraints check
323
+ if not is_valid_password(password):
324
+ return jsonify({'message': 'Password must be at least 8 characters long and include uppercase, lowercase, number, and special character.'}), 400
325
+
326
+ hashed_pw = generate_password_hash(password)
327
+ users_collection.insert_one({
328
+ 'first_name': first_name,
329
+ 'last_name': last_name,
330
+ 'email': email,
331
+ 'password': hashed_pw
332
+ })
333
+
334
+ logging.info(f"New user registered: {first_name} {last_name}, Email: {email}")
335
+ return jsonify({'message': 'Registration successful!'}), 201
336
+
337
+ # 🔵 Login Route
338
+ @app.route('/Login', methods=['POST'])
339
+ def login():
340
+ data = request.get_json()
341
+ email = data.get('email')
342
+ password = data.get('password')
343
+
344
+ # Check if the user exists
345
+ user = users_collection.find_one({'email': email})
346
+ if not user or not check_password_hash(user['password'], password):
347
+ logging.warning(f"Failed login attempt for email: {email}")
348
+ return jsonify({'message': 'Invalid email or password!'}), 401
349
+
350
+ logging.info(f"User logged in successfully: {email}")
351
+ return jsonify({'message': 'Login successful!'}), 200
352
+ @app.route('/ForgotPassword', methods=['POST'])
353
+ def forgot_password():
354
+ data = request.get_json()
355
+ email = data.get('email')
356
+ new_password = data.get('newPassword')
357
+ confirm_password = data.get('confirmPassword')
358
+
359
+ # Check if passwords match
360
+ if new_password != confirm_password:
361
+ logging.warning(f"Password reset failed. Passwords do not match for email: {email}")
362
+ return jsonify({'message': 'Passwords do not match!'}), 400
363
+
364
+ # Check if the user exists
365
+ user = users_collection.find_one({'email': email})
366
+ if not user:
367
+ logging.warning(f"Password reset attempt for non-existent email: {email}")
368
+ return jsonify({'message': 'User not found!'}), 404
369
+
370
+ # Hash the new password and update it
371
+ hashed_pw = generate_password_hash(new_password)
372
+ users_collection.update_one({'email': email}, {'$set': {'password': hashed_pw}})
373
+
374
+ logging.info(f"Password successfully reset for email: {email}")
375
+ return jsonify({'message': 'Password updated successfully!'}), 200
376
+
377
+
378
+
379
+
380
+
381
+
382
+ # 🟣 Contact Form Route (React Page: Contact)
383
+ @app.route('/Contact', methods=['POST'])
384
+ def contact():
385
+ data = request.get_json()
386
+ email = data.get('email')
387
+ query = data.get('query')
388
+ message = data.get('message')
389
+
390
+ # Check if all fields are provided
391
+ if not email or not query or not message:
392
+ logging.warning(f"Incomplete contact form submission from email: {email}")
393
+ return jsonify({'message': 'All fields are required!'}), 400
394
+
395
+ # Insert the contact data
396
+ contact_data = {
397
+ 'email': email,
398
+ 'query': query,
399
+ 'message': message
400
+ }
401
+ contacts_collection.insert_one(contact_data)
402
+
403
+ logging.info(f"Contact form submitted successfully from email: {email}")
404
+ return jsonify({'message': 'Your message has been sent successfully.'}), 200
405
+
406
+ if __name__ == "__main__":
407
+ app.run(host="0.0.0.0", port=7860, debug=True)
requirements.txt ADDED
Binary file (1.07 kB). View file