Update app/.pyfun

#1
by BadTin - opened
Files changed (1) hide show
  1. app/.pyfun +41 -23
app/.pyfun CHANGED
@@ -63,7 +63,7 @@ SEARCH_TIMEOUT_SEC = "30"
63
  [LLM_PROVIDERS]
64
 
65
 
66
- # later for customs assi
67
  [LLM_PROVIDER.smollm]
68
  active = "true"
69
  base_url = "https://codey-lab-smollm2-customs.hf.space/v1"
@@ -73,6 +73,8 @@ SEARCH_TIMEOUT_SEC = "30"
73
  fallback_to = "gemini"
74
  [LLM_PROVIDER.smollm_END]
75
 
 
 
76
  [LLM_PROVIDER.anthropic]
77
  active = "true"
78
  base_url = "https://api.anthropic.com/v1"
@@ -80,7 +82,8 @@ SEARCH_TIMEOUT_SEC = "30"
80
  api_version_header = "2023-06-01"
81
  default_model = "claude-haiku-4-5-20251001"
82
  models = "claude-opus-4-6, claude-sonnet-4-6, claude-haiku-4-5-20251001"
83
- fallback_to = "gemini" # ← gemini statt openrouter!
 
84
  [LLM_PROVIDER.anthropic_END]
85
 
86
  [LLM_PROVIDER.gemini]
@@ -88,7 +91,8 @@ SEARCH_TIMEOUT_SEC = "30"
88
  base_url = "https://generativelanguage.googleapis.com/v1beta"
89
  env_key = "GEMINI_API_KEY"
90
  default_model = "gemini-2.5-flash"
91
- models = "gemini-2.0-flash, gemini-2.5-flash, gemini-2.5-flash-lite, gemini-3.1-flash-lite-preview, gemini-3-flash-preview, gemini-3.1-pro-preview, "
 
92
  fallback_to = "openrouter"
93
  [LLM_PROVIDER.gemini_END]
94
 
@@ -98,16 +102,17 @@ SEARCH_TIMEOUT_SEC = "30"
98
  env_key = "OPENROUTER_API_KEY" # β†’ .env: OPENROUTER_API_KEY=sk-or-...
99
  default_model = "nvidia/nemotron-nano-9b-v2:free"
100
  models = "meta-llama/llama-3-8b-instruct, mistralai/mistral-7b-instruct, nvidia/nemotron-nano-9b-v2:free, google/gemma-3-27b-it:free, openai/gpt-oss-20b:free, mistralai/Mistral-7B-Instruct-v0.3, meta-llama/Llama-3.3-70B-Instruct"
101
- fallback_to = "" # last in chain, no further fallback
102
  [LLM_PROVIDER.openrouter_END]
103
 
104
  [LLM_PROVIDER.huggingface]
105
- active = "false"
106
  base_url = "https://api-inference.huggingface.co/v1"
107
  env_key = "HF_TOKEN" # β†’ .env: HF_TOKEN=hf_...
108
- default_model = "meta-llama/Llama-3.1-8B-Instruct"
109
- models = "meta-llama/Llama-3.1-8B-Instruct, mistralai/Mistral-Nemo-Instruct-2407, Qwen/Qwen2.5-72B-Instruct"
110
- fallback_to = "" # last in chain, no further fallback
 
111
  [LLM_PROVIDER.huggingface_END]
112
 
113
  # ── Add more LLM providers below ──────────────────────────────────────────
@@ -485,23 +490,36 @@ SEARCH_TIMEOUT_SEC = "30"
485
  # timeout_sec = "30"
486
  # [TOOL.code_exec_END]
487
 
488
- # ── Shellmaster 2.0 ──────────────────────────────────────────────────────────
489
-
490
  [TOOL.shellmaster]
491
- active = "false"
492
- shellmaster_agent_url = "http://localhost:5004"
493
- description = "Generate safe shell commands for requested tasks"
494
- provider_type = "llm"
495
- default_provider = "smollm"
496
- timeout_sec = "30"
497
- shellmaster_commands_file = "shellmaster_commands.jsonl"
498
- shellmaster_commands_dataset_url = ""
499
- shellmaster_customs_model_url = ""
500
- system_prompt = "You are ShellMaster. Generate safe shell commands.
501
- ALWAYS include a backup and recovery plan.
502
- Output JSON: {command, backup, recovery, risk}"
503
  [TOOL.shellmaster_END]
504
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505
  [TOOLS_END]
506
 
507
  # =============================================================================
@@ -522,7 +540,7 @@ DEBUG = "ON" # ON | OFF
522
  DEBUG_LEVEL = "FULL" # FULL | WARN | ERROR
523
  LOG_FILE = "hub_debug.log"
524
  LOG_REQUESTS = "true" # log every provider request
525
- LOG_RESPONSES = "false" # careful: may log sensitive data!
526
  [DEBUG_END]
527
 
528
  # =============================================================================
 
63
  [LLM_PROVIDERS]
64
 
65
 
66
+ # customs LLM
67
  [LLM_PROVIDER.smollm]
68
  active = "true"
69
  base_url = "https://codey-lab-smollm2-customs.hf.space/v1"
 
73
  fallback_to = "gemini"
74
  [LLM_PROVIDER.smollm_END]
75
 
76
+ # AI Wrapper
77
+
78
  [LLM_PROVIDER.anthropic]
79
  active = "true"
80
  base_url = "https://api.anthropic.com/v1"
 
82
  api_version_header = "2023-06-01"
83
  default_model = "claude-haiku-4-5-20251001"
84
  models = "claude-opus-4-6, claude-sonnet-4-6, claude-haiku-4-5-20251001"
85
+ supports_cache = "true"
86
+ fallback_to = "gemini"
87
  [LLM_PROVIDER.anthropic_END]
88
 
89
  [LLM_PROVIDER.gemini]
 
91
  base_url = "https://generativelanguage.googleapis.com/v1beta"
92
  env_key = "GEMINI_API_KEY"
93
  default_model = "gemini-2.5-flash"
94
+ models = "gemini-2.0-flash, gemini-2.5-flash, gemini-2.5-flash-lite, gemini-3.1-flash-lite-preview, gemini-3-flash-preview, gemini-3.1-pro-preview"
95
+ supports_cache = "true"
96
  fallback_to = "openrouter"
97
  [LLM_PROVIDER.gemini_END]
98
 
 
102
  env_key = "OPENROUTER_API_KEY" # β†’ .env: OPENROUTER_API_KEY=sk-or-...
103
  default_model = "nvidia/nemotron-nano-9b-v2:free"
104
  models = "meta-llama/llama-3-8b-instruct, mistralai/mistral-7b-instruct, nvidia/nemotron-nano-9b-v2:free, google/gemma-3-27b-it:free, openai/gpt-oss-20b:free, mistralai/Mistral-7B-Instruct-v0.3, meta-llama/Llama-3.3-70B-Instruct"
105
+ fallback_to = "huggingface"
106
  [LLM_PROVIDER.openrouter_END]
107
 
108
  [LLM_PROVIDER.huggingface]
109
+ active = "true"
110
  base_url = "https://api-inference.huggingface.co/v1"
111
  env_key = "HF_TOKEN" # β†’ .env: HF_TOKEN=hf_...
112
+ default_model = "Qwen/Qwen2.5-Coder-32B-Instruct"
113
+ models = "Qwen/Qwen2.5-Coder-32B-Instruct, meta-llama/Llama-3.1-8B-Instruct, mistralai/Mistral-Nemo-Instruct-2407, Qwen/Qwen2.5-72B-Instruct"
114
+
115
+ fallback_to = "smollm" # ← last in chain, no further fallback
116
  [LLM_PROVIDER.huggingface_END]
117
 
118
  # ── Add more LLM providers below ──────────────────────────────────────────
 
490
  # timeout_sec = "30"
491
  # [TOOL.code_exec_END]
492
 
493
+ # ── Shellmaster 2.0 ──────────────────────────────────────────────────────────
 
494
  [TOOL.shellmaster]
495
+ active = "false"
496
+ shellmaster_agent_url = "http://localhost:5004"
497
+ shellmaster_token = ""
498
+ description = "AI-powered shell assistant β€” generates and executes safe shell commands"
499
+ provider_type = "llm"
500
+ default_provider = "smollm"
501
+ timeout_sec = "30"
502
+ shellmaster_commands_file = "shellmaster_commands.jsonl"
503
+ shellmaster_commands_dataset = ""
504
+ shellmaster_customs_model = ""
505
+ system_prompt = "You are ShellMaster. Generate safe shell commands for the requested task. Output ONLY valid JSON: {\"command\": \"cmd here\", \"backup\": \"backup cmd or empty\", \"recovery\": \"recovery cmd or empty\", \"risk\": \"low|medium|high\"}"
 
506
  [TOOL.shellmaster_END]
507
 
508
+ [TOOL.shellmaster_confirm]
509
+ active = "false"
510
+ description = "Confirm and execute the last ShellMaster command β€” runs backup first"
511
+ [TOOL.shellmaster_confirm_END]
512
+
513
+ [TOOL.shellmaster_recover]
514
+ active = "false"
515
+ description = "Execute recovery plan for last ShellMaster command"
516
+ [TOOL.shellmaster_recover_END]
517
+
518
+ [TOOL.shellmaster_commands]
519
+ active = "false"
520
+ description = "List allowed ShellMaster commands from registry"
521
+ [TOOL.shellmaster_commands_END]
522
+
523
  [TOOLS_END]
524
 
525
  # =============================================================================
 
540
  DEBUG_LEVEL = "FULL" # FULL | WARN | ERROR
541
  LOG_FILE = "hub_debug.log"
542
  LOG_REQUESTS = "true" # log every provider request
543
+ LOG_RESPONSES = "true" # careful: may log sensitive data!
544
  [DEBUG_END]
545
 
546
  # =============================================================================