Anshu13 commited on
Commit
4c3fc5c
·
verified ·
1 Parent(s): 4fbadf6

Upload 6 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Examples/WhatsApp[[:space:]]Video[[:space:]]2025-06-03[[:space:]]at[[:space:]]21.40.27_ab5a54b6.mp4 filter=lfs diff=lfs merge=lfs -text
37
+ Examples/WhatsApp[[:space:]]Video[[:space:]]2025-06-04[[:space:]]at[[:space:]]21.35.45_a5842999.mp4 filter=lfs diff=lfs merge=lfs -text
Examples/WhatsApp Video 2025-06-03 at 21.40.27_ab5a54b6.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b18a80d642e9a6ebdfe25cab81c7804301dd0a849b9f377cd287f5b47592d80
3
+ size 1778559
Examples/WhatsApp Video 2025-06-04 at 21.35.45_a5842999.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47a5ff02a2e8336c4b1cee2c31ab3063fc0557942245c03398083cdd3dcc9643
3
+ size 1607622
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ from torchvision import transforms
4
+ import gradio as gr
5
+ import os
6
+ import cv2
7
+ from PIL import Image
8
+ from model import create_model
9
+ model,transform=create_model(num_of classes=3)
10
+ model.eval()
11
+ def classify_video(video):
12
+ cap = cv2.VideoCapture(video)
13
+ predictions = []
14
+ Fire=[]
15
+ Smoke=[]
16
+ Default=[]
17
+ while cap.isOpened():
18
+ ret, frame = cap.read()
19
+ if not ret:
20
+ break
21
+ img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
22
+ img_pil = Image.fromarray(img)
23
+ img_tensor = transform(img_pil).unsqueeze(0)
24
+ with torch.no_grad():
25
+ output = model(img_tensor)
26
+ pred = output.argmax().item()
27
+ predictions.append(pred)
28
+ cap.release()
29
+ class_names=['DEFAULT', 'FIRE', 'SMOKE']
30
+ for i in predictions:
31
+ if i == 1:
32
+ Fire.append(i)
33
+ elif i == 2:
34
+ Smoke.append(i)
35
+ else:
36
+ Default.append(i)
37
+ if Fire!=[] and Smoke!=[]:
38
+ return f"Spotted {class_names[1]} and {class_names[2]}"
39
+ elif Fire!=[]:
40
+ return f"Spotted {class_names[1]}"
41
+ elif Smoke!=[]:
42
+ return f"Spotted {class_names[2]}"
43
+ else:
44
+ return f"Spotted {class_names[0]}"
45
+ Description="An MobileNET model trained to classify Fire and Smoke through Videos"
46
+ Article="Created at jupyter NoteBook with GPU NVIDIA_GeForce_MX350"
47
+ example_list=[["Examples/"+ example] for example in os.listdir("Examples")if example.endswith((".mp4", ".avi", ".mov"))]
48
+ gr.Interface(
49
+ fn=classify_video,
50
+ inputs=gr.Video(streaming=True),
51
+ outputs="text",
52
+ title="Fire and Smoke Classifier",
53
+ examples=example_list,
54
+ description=description,
55
+ article=article,
56
+ live="True"
57
+ ).launch()
fire_smoke_weight.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc7e9918c5dfaf3f7a790f0923c3cf59b42403571628641bad04b75f7fef8bde
3
+ size 22153066
model.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torchvision
2
+ import torch
3
+ from torchvision import transforms
4
+ from torch import nn
5
+ def create_model(num_of_classes:int=3):
6
+ weights=torchvision.models.MobileNet_V3_Large_Weights.DEFAULT
7
+ transform=weights.transforms()
8
+ model=torchvision.models.mobilenet_v3_large(weights=weights)
9
+ for parameter in model.parameters():
10
+ parameter.requires_grad=False
11
+ for parameter in model.classifier[-4:].parameters():
12
+ parameter.requires_grad=True
13
+ for parameter in model.features[-6:].parameters():
14
+ parameter.requires_grad=True
15
+ model.classifier[3]=nn.Sequential(nn.Linear(1280,1000),nn.ReLU(),nn.Dropout(p=0.3),nn.Linear(1000,num_of_classes))
16
+ return model,transform
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch==2.5.1
2
+ torchvision==0.20.1
3
+ gradio==5.29.1
4
+ cv==4.11.0
5
+ Pillow==10.2.0