k-mktr commited on
Commit
bb06291
·
verified ·
1 Parent(s): 7c251ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -26
app.py CHANGED
@@ -10,12 +10,11 @@ from typing import Dict
10
  import json
11
  from leaderboard import (
12
  get_current_leaderboard,
13
- update_leaderboard,
14
- start_backup_thread,
15
- get_leaderboard,
16
  get_elo_leaderboard,
17
- ensure_elo_ratings_initialized,
18
- get_archived_models_list
19
  )
20
  import openai
21
  from collections import Counter
@@ -62,9 +61,7 @@ start_backup_thread()
62
 
63
  # Function to get available models (using predefined list)
64
  def get_available_models():
65
- # Filter out models that have been archived
66
- active_models = [model[0] for model in config.get_approved_models() if model[0] not in leaderboard.load_archived_models()]
67
- return active_models
68
 
69
  # Function to get recent opponents for a model
70
  recent_opponents = {}
@@ -98,8 +95,7 @@ def call_ollama_api(model, prompt):
98
  "content": prompt
99
  }
100
  ],
101
- timeout=180,
102
- max_tokens=config.MAX_TOKENS
103
  )
104
  logger.info(f"Received response for model: {model}")
105
 
@@ -578,20 +574,6 @@ with gr.Blocks(css="""
578
  interactive=True,
579
  label="ELO Leaderboard"
580
  )
581
-
582
- # Archived Models Tab
583
- with gr.Tab("Archived Models"):
584
- gr.Markdown("""
585
- ### Archived Models
586
- These models have reached the battle threshold and have been moved to the archive.
587
- """)
588
- archived_models_table = gr.Dataframe(
589
- headers=["Model", "Wins", "Losses", "Total Battles", "Win Rate"],
590
- row_count=10,
591
- col_count=5,
592
- interactive=True,
593
- label="Archived Models"
594
- )
595
 
596
  # Latest Updates Tab
597
  with gr.Tab("Latest Updates"):
@@ -644,11 +626,10 @@ with gr.Blocks(css="""
644
  # Update leaderboard on launch
645
  demo.load(get_leaderboard, outputs=leaderboard)
646
  demo.load(get_elo_leaderboard, outputs=elo_leaderboard)
647
- demo.load(get_archived_models_list, outputs=archived_models_table)
648
 
649
  if __name__ == "__main__":
650
  # Initialize ELO ratings before launching the app
651
  ensure_elo_ratings_initialized()
652
  # Start the model refresh thread
653
- # config.start_model_refresh_thread()
654
  demo.launch(show_api=False)
 
10
  import json
11
  from leaderboard import (
12
  get_current_leaderboard,
13
+ update_leaderboard,
14
+ start_backup_thread,
15
+ get_leaderboard,
16
  get_elo_leaderboard,
17
+ ensure_elo_ratings_initialized
 
18
  )
19
  import openai
20
  from collections import Counter
 
61
 
62
  # Function to get available models (using predefined list)
63
  def get_available_models():
64
+ return [model[0] for model in config.get_approved_models()]
 
 
65
 
66
  # Function to get recent opponents for a model
67
  recent_opponents = {}
 
95
  "content": prompt
96
  }
97
  ],
98
+ timeout=180
 
99
  )
100
  logger.info(f"Received response for model: {model}")
101
 
 
574
  interactive=True,
575
  label="ELO Leaderboard"
576
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
577
 
578
  # Latest Updates Tab
579
  with gr.Tab("Latest Updates"):
 
626
  # Update leaderboard on launch
627
  demo.load(get_leaderboard, outputs=leaderboard)
628
  demo.load(get_elo_leaderboard, outputs=elo_leaderboard)
 
629
 
630
  if __name__ == "__main__":
631
  # Initialize ELO ratings before launching the app
632
  ensure_elo_ratings_initialized()
633
  # Start the model refresh thread
634
+ config.start_model_refresh_thread()
635
  demo.launch(show_api=False)