heffnt commited on
Commit
1fea2ad
·
0 Parent(s):

Add initial project files including Gradio app, Git LFS configuration, and CI workflows

Browse files

- Created .gitattributes for Git LFS tracking of image and model files.
- Added .gitignore to exclude common Python artifacts.
- Implemented main Gradio application in app.py for a chatbot interface.
- Included README.md with project details and dependencies.
- Specified required packages in requirements.txt.
- Set up GitHub Actions workflows for Discord notifications, Hugging Face synchronization, and testing.
- Added assets for the chatbot interface and icons.
- Created a smoke test in tests/test_smoke.py to validate API token usage.

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Git LFS tracking for common image formats
2
+ *.jpg filter=lfs diff=lfs merge=lfs -text
3
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
4
+ *.png filter=lfs diff=lfs merge=lfs -text
5
+ *.gif filter=lfs diff=lfs merge=lfs -text
6
+ *.bmp filter=lfs diff=lfs merge=lfs -text
7
+ *.tiff filter=lfs diff=lfs merge=lfs -text
8
+ *.webp filter=lfs diff=lfs merge=lfs -text
9
+ *.svg filter=lfs diff=lfs merge=lfs -text
10
+ *.ico filter=lfs diff=lfs merge=lfs -text
11
+
12
+ # Other large files you might want to track
13
+ *.zip filter=lfs diff=lfs merge=lfs -text
14
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
15
+ *.pdf filter=lfs diff=lfs merge=lfs -text
16
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
17
+ *.mov filter=lfs diff=lfs merge=lfs -text
18
+ *.avi filter=lfs diff=lfs merge=lfs -text
19
+
20
+ # Model files (common in ML projects)
21
+ *.bin filter=lfs diff=lfs merge=lfs -text
22
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
23
+ *.onnx filter=lfs diff=lfs merge=lfs -text
24
+ *.h5 filter=lfs diff=lfs merge=lfs -text
25
+ *.pkl filter=lfs diff=lfs merge=lfs -text
26
+ *.pt filter=lfs diff=lfs merge=lfs -text
27
+ *.pth filter=lfs diff=lfs merge=lfs -text
.github/workflows/discord-notification.yml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Discord Notification
2
+
3
+ on:
4
+ push:
5
+ pull_request:
6
+ issues:
7
+ release:
8
+ create:
9
+ delete:
10
+ workflow_run:
11
+ workflows:
12
+ - "Sync to Hugging Face hub"
13
+ - "Run chatbot test"
14
+ types:
15
+ - completed
16
+
17
+ jobs:
18
+ notify:
19
+ runs-on: ubuntu-latest
20
+
21
+ steps:
22
+ - name: Send Discord notification
23
+ env:
24
+ DISCORD_WEBHOOK_URL: ${{ secrets.DISCORD_WEBHOOK_URL }} # Set in Github Secrets
25
+ run: |
26
+ EVENT_NAME="${{ github.event_name }}"
27
+
28
+ case "$EVENT_NAME" in
29
+ push)
30
+ SHORT_SHA="${{ github.sha }}"
31
+ SHORT_SHA="${SHORT_SHA:0:7}"
32
+ MESSAGE="Commit ($SHORT_SHA) was pushed to ${{ github.ref_name }} branch by $GITHUB_ACTOR: ${{ github.event.head_commit.message }}"
33
+ ;;
34
+ pull_request)
35
+ MESSAGE="Pull request #${{ github.event.pull_request.number }} ${{ github.event.action }} by $GITHUB_ACTOR: ${{ github.event.pull_request.title }} (from ${{ github.event.pull_request.head.ref }} to ${{ github.event.pull_request.base.ref }})"
36
+ ;;
37
+ issues)
38
+ MESSAGE="Issue #${{ github.event.issue.number }} ${{ github.event.action }} by $GITHUB_ACTOR: ${{ github.event.issue.title }}"
39
+ ;;
40
+ release)
41
+ MESSAGE="Release ${{ github.event.release.tag_name }} ${{ github.event.action }} by $GITHUB_ACTOR: ${{ github.event.release.name }}"
42
+ ;;
43
+ create)
44
+ MESSAGE="Created ${{ github.event.ref_type }} ${{ github.event.ref }} by $GITHUB_ACTOR"
45
+ ;;
46
+ delete)
47
+ MESSAGE="Deleted ${{ github.event.ref_type }} ${{ github.event.ref }} by $GITHUB_ACTOR"
48
+ ;;
49
+ workflow_run)
50
+ NAME="${{ github.event.workflow_run.name }}"
51
+ CONCLUSION="${{ github.event.workflow_run.conclusion }}"
52
+ BRANCH="${{ github.event.workflow_run.head_branch }}"
53
+ SHA="${{ github.event.workflow_run.head_sha }}"
54
+
55
+ if [ "$CONCLUSION" = "success" ]; then
56
+ STATUS="PASSED"
57
+ elif [ "$CONCLUSION" = "failure" ]; then
58
+ STATUS="FAILED"
59
+ else
60
+ STATUS="$CONCLUSION"
61
+ fi
62
+
63
+ SHORT_SHA="${SHA:0:7}"
64
+ MESSAGE=" - Workflow $NAME $STATUS on $BRANCH (commit $SHORT_SHA)"
65
+ ;;
66
+ *)
67
+ MESSAGE="Event $EVENT_NAME by $GITHUB_ACTOR on ${{ github.repository }} at ${{ github.ref_name }}"
68
+ ;;
69
+ esac
70
+
71
+ curl -X POST -H "Content-Type: application/json" \
72
+ -d "{\"content\": \"$MESSAGE\"}" \
73
+ $DISCORD_WEBHOOK_URL
.github/workflows/hf-sync.yml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face hub
2
+ on:
3
+ push:
4
+
5
+ # to run this workflow manually from the Actions tab
6
+ workflow_dispatch:
7
+
8
+ jobs:
9
+ sync-to-hub:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: actions/checkout@v3
13
+ with:
14
+ fetch-depth: 0
15
+ lfs: true
16
+ - name: Push to hub
17
+ env:
18
+ HF_TOKEN: ${{ secrets.Smart_Confidant_Token }}
19
+ run: git push --force https://heffnt:$HF_TOKEN@huggingface.co/spaces/heffnt/Smart_Confidant HEAD:main
.github/workflows/test.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Run chatbot test
2
+
3
+ on:
4
+ push:
5
+ pull_request:
6
+
7
+ jobs:
8
+ test:
9
+ runs-on: ubuntu-latest
10
+
11
+ steps:
12
+ - uses: actions/checkout@v4
13
+
14
+ - uses: actions/setup-python@v5
15
+ with:
16
+ python-version: "3.11"
17
+
18
+ - name: Install dependencies
19
+ run: |
20
+ pip install pytest gradio[oauth] huggingface_hub
21
+
22
+ - name: Run pytest with HF token
23
+ env:
24
+ HF_TOKEN: ${{ secrets.Smart_Confidant_Token }}
25
+ run: pytest -q
.gitignore ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.py[cod]
3
+ *.pyo
4
+ *.pyd
5
+ *.db
6
+ *.sqlite3
7
+ *.log
8
+ *.env
README.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: CSDS553 Demo
3
+ emoji: 💬
4
+ colorFrom: yellow
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 5.44.1
8
+ app_file: app.py
9
+ pinned: false
10
+ hf_oauth: true
11
+ hf_oauth_scopes:
12
+ - inference-api
13
+ ---
14
+
15
+ An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
16
+
17
+ Increment this to force push to Github: 1243
app.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import os
4
+ import base64
5
+ from pathlib import Path
6
+
7
+ # Configuration
8
+ LOCAL_MODELS = ["tiiuae/Falcon-H1-0.5B-Instruct"]
9
+ API_MODELS = ["openai/gpt-oss-20b"]
10
+ DEFAULT_SYSTEM_MESSAGE = "You are an expert assistant for Magic: The Gathering. You're name is Smart Confidant, but people tend to call you Bob."
11
+ TITLE = "🎓🧙🏻‍♂️ Smart Confidant 🧙🏻‍♂️🎓"
12
+
13
+ # Create model options with labels
14
+ MODEL_OPTIONS = []
15
+ for model in LOCAL_MODELS:
16
+ MODEL_OPTIONS.append(f"{model} (local)")
17
+ for model in API_MODELS:
18
+ MODEL_OPTIONS.append(f"{model} (api)")
19
+
20
+ pipe = None
21
+ stop_inference = False
22
+
23
+ ASSETS_DIR = Path(__file__).parent / "assets"
24
+ BACKGROUND_IMAGE_PATH = ASSETS_DIR / "confidant_pattern.png"
25
+ try:
26
+ with open(BACKGROUND_IMAGE_PATH, "rb") as _img_f:
27
+ _encoded_img = base64.b64encode(_img_f.read()).decode("ascii")
28
+ BACKGROUND_DATA_URL = f"data:image/png;base64,{_encoded_img}"
29
+ except Exception as e:
30
+ print(f"Error loading background image: {e}")
31
+ BACKGROUND_DATA_URL = ""
32
+
33
+ # Fancy styling
34
+ fancy_css = f"""
35
+ html, body, #root {{
36
+ background-image: url('{BACKGROUND_DATA_URL}');
37
+ background-repeat: repeat;
38
+ background-size: auto;
39
+ background-color: transparent;
40
+ }}
41
+ .gradio-container {{
42
+ max-width: 700px;
43
+ margin: 0 auto;
44
+ padding: 20px;
45
+ background-color: #2d2d2d;
46
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
47
+ border-radius: 10px;
48
+ font-family: 'Arial', sans-serif;
49
+ }}
50
+ .gr-button {{
51
+ background-color: #4CAF50;
52
+ color: white;
53
+ border: none;
54
+ border-radius: 5px;
55
+ padding: 10px 20px;
56
+ cursor: pointer;
57
+ transition: background-color 0.3s ease;
58
+ }}
59
+ .gr-button:hover {{
60
+ background-color: #45a049;
61
+ }}
62
+ .gr-slider input {{
63
+ color: #4CAF50;
64
+ }}
65
+ .gr-chat {{
66
+ font-size: 16px;
67
+ }}
68
+ #title {{
69
+ text-align: center;
70
+ font-size: 2em;
71
+ margin-bottom: 20px;
72
+ color: #333;
73
+ }}
74
+ """
75
+
76
+ def respond(
77
+ message,
78
+ history: list[dict[str, str]],
79
+ system_message,
80
+ max_tokens,
81
+ temperature,
82
+ top_p,
83
+ hf_token: gr.OAuthToken,
84
+ selected_model: str,
85
+ ):
86
+ global pipe
87
+
88
+ # Build messages from history
89
+ messages = [{"role": "system", "content": system_message}]
90
+ messages.extend(history)
91
+ messages.append({"role": "user", "content": message})
92
+
93
+ # Determine if model is local or API and extract model name
94
+ is_local = selected_model.endswith("(local)")
95
+ model_name = selected_model.replace(" (local)", "").replace(" (api)", "")
96
+
97
+ response = ""
98
+
99
+ if is_local:
100
+ print(f"[MODE] local - {model_name}")
101
+ from transformers import pipeline
102
+ import torch
103
+ if pipe is None or pipe.model.name_or_path != model_name:
104
+ pipe = pipeline("text-generation", model=model_name)
105
+
106
+ # Build prompt as plain text
107
+ prompt = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
108
+
109
+ outputs = pipe(
110
+ prompt,
111
+ max_new_tokens=max_tokens,
112
+ do_sample=True,
113
+ temperature=temperature,
114
+ top_p=top_p,
115
+ )
116
+
117
+ response = outputs[0]["generated_text"][len(prompt):]
118
+ yield response.strip()
119
+
120
+ else:
121
+ print(f"[MODE] api - {model_name}")
122
+
123
+ if hf_token is None or not getattr(hf_token, "token", None):
124
+ yield "⚠️ Please log in with your Hugging Face account first."
125
+ return
126
+
127
+ client = InferenceClient(token=hf_token.token, model=model_name)
128
+
129
+ for chunk in client.chat_completion(
130
+ messages,
131
+ max_tokens=max_tokens,
132
+ stream=True,
133
+ temperature=temperature,
134
+ top_p=top_p,
135
+ ):
136
+ choices = chunk.choices
137
+ token = ""
138
+ if len(choices) and choices[0].delta.content:
139
+ token = choices[0].delta.content
140
+ response += token
141
+ yield response
142
+
143
+
144
+ with gr.Blocks(css=fancy_css) as demo:
145
+ gr.LoginButton()
146
+ gr.Markdown(f"<h1 style='text-align: center;'>{TITLE}</h1>")
147
+
148
+ # Create custom chatbot with avatar images
149
+ chatbot = gr.Chatbot(
150
+ type="messages",
151
+ avatar_images=(str(ASSETS_DIR / "monster_icon.png"), str(ASSETS_DIR / "smart_confidant_icon.png"))
152
+ )
153
+
154
+ # Create ChatInterface first, using built-in accordion for additional inputs
155
+ gr.ChatInterface(
156
+ fn=respond,
157
+ chatbot=chatbot,
158
+ additional_inputs=[
159
+ gr.Textbox(value=DEFAULT_SYSTEM_MESSAGE, label="System message"),
160
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
161
+ gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
162
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
163
+ gr.Radio(choices=MODEL_OPTIONS, label="Select Model", value=MODEL_OPTIONS[0]),
164
+ ],
165
+ additional_inputs_accordion="Additional Settings",
166
+ type="messages",
167
+ )
168
+
169
+ if __name__ == "__main__":
170
+ demo.launch()
assets/confidant_pattern.png ADDED

Git LFS Details

  • SHA256: 4c14a84c15babfeba7d7163d9d8c00533cca85f35ad2c0766c0c00f886b27897
  • Pointer size: 130 Bytes
  • Size of remote file: 10.1 kB
assets/monster_icon.png ADDED

Git LFS Details

  • SHA256: 39fc86d8274065c584d941b6e808d333b8f1dd4146966d95499eb3a98cf60c43
  • Pointer size: 130 Bytes
  • Size of remote file: 52.8 kB
assets/smart_confidant_icon.png ADDED

Git LFS Details

  • SHA256: a9da203d69dd6e788d11a5af844d9ce7fbafd6599c3e449cf897af7daa17bd0d
  • Pointer size: 132 Bytes
  • Size of remote file: 3.66 MB
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ torch
tests/test_smoke.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys, os
2
+ sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
3
+
4
+ import app
5
+
6
+ class Token:
7
+ def __init__(self, token): self.token = token
8
+
9
+ def test_api_requires_token():
10
+ hf_token = os.environ.get("HF_TOKEN")
11
+ assert hf_token, "HF_TOKEN not set in environment"
12
+
13
+ gen = app.respond(
14
+ message="Hi",
15
+ history=[],
16
+ system_message="test",
17
+ max_tokens=8,
18
+ temperature=0.2,
19
+ top_p=0.9,
20
+ hf_token=Token(hf_token),
21
+ selected_model="openai/gpt-oss-20b (api)",
22
+ )
23
+ first = next(gen)
24
+ assert "please log in" not in first.lower() # shouldn't get warning
25
+ assert isinstance(first, str)