Duplicate from akhaliq/VideoMAE
Browse filesCo-authored-by: Ahsen Khaliq <akhaliq@users.noreply.huggingface.co>
- .gitattributes +31 -0
- README.md +13 -0
- app.py +52 -0
- requirements.txt +4 -0
.gitattributes
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: VideoMAE
|
| 3 |
+
emoji: 💩
|
| 4 |
+
colorFrom: pink
|
| 5 |
+
colorTo: pink
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.1.7
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
duplicated_from: akhaliq/VideoMAE
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from decord import VideoReader, cpu
|
| 2 |
+
import torch
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
from transformers import VideoMAEFeatureExtractor, VideoMAEForVideoClassification
|
| 6 |
+
from huggingface_hub import hf_hub_download
|
| 7 |
+
import gradio as gr
|
| 8 |
+
|
| 9 |
+
np.random.seed(0)
|
| 10 |
+
|
| 11 |
+
def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
|
| 12 |
+
converted_len = int(clip_len * frame_sample_rate)
|
| 13 |
+
end_idx = np.random.randint(converted_len, seg_len)
|
| 14 |
+
start_idx = end_idx - converted_len
|
| 15 |
+
indices = np.linspace(start_idx, end_idx, num=clip_len)
|
| 16 |
+
indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
|
| 17 |
+
return indices
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def inference(file_path):
|
| 21 |
+
# video clip consists of 300 frames (10 seconds at 30 FPS)
|
| 22 |
+
videoreader = VideoReader(file_path, num_threads=1, ctx=cpu(0))
|
| 23 |
+
|
| 24 |
+
# sample 16 frames
|
| 25 |
+
videoreader.seek(0)
|
| 26 |
+
indices = sample_frame_indices(clip_len=16, frame_sample_rate=4, seg_len=len(videoreader))
|
| 27 |
+
video = videoreader.get_batch(indices).asnumpy()
|
| 28 |
+
|
| 29 |
+
feature_extractor = VideoMAEFeatureExtractor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
|
| 30 |
+
model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
|
| 31 |
+
|
| 32 |
+
inputs = feature_extractor(list(video), return_tensors="pt")
|
| 33 |
+
|
| 34 |
+
with torch.no_grad():
|
| 35 |
+
outputs = model(**inputs)
|
| 36 |
+
logits = outputs.logits
|
| 37 |
+
|
| 38 |
+
# model predicts one of the 400 Kinetics-400 classes
|
| 39 |
+
predicted_label = logits.argmax(-1).item()
|
| 40 |
+
return model.config.id2label[predicted_label]
|
| 41 |
+
|
| 42 |
+
with gr.Blocks() as demo:
|
| 43 |
+
with gr.Row():
|
| 44 |
+
with gr.Column():
|
| 45 |
+
video = gr.Video()
|
| 46 |
+
btn = gr.Button(value="Run")
|
| 47 |
+
with gr.Column():
|
| 48 |
+
label = gr.Textbox(label="Predicted Label")
|
| 49 |
+
|
| 50 |
+
btn.click(inference, inputs=video, outputs=label)
|
| 51 |
+
|
| 52 |
+
demo.launch()
|
requirements.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
decord
|
| 2 |
+
transformers
|
| 3 |
+
gradio
|
| 4 |
+
torch
|