Chryslerx10 commited on
Commit
a66797c
Β·
1 Parent(s): 7abbfc2
Files changed (2) hide show
  1. app.py +2 -4
  2. medrax/agent/agent.py +2 -2
app.py CHANGED
@@ -76,7 +76,6 @@ def initialize_agent(
76
 
77
  checkpointer = MemorySaver()
78
  model = ChatOpenAI(model=model, temperature=temperature, top_p=top_p, **openai_kwargs)
79
- print(model.invoke("Hey how are you?"))
80
  agent = ChatAgent(
81
  model,
82
  tools=list(tools_dict.values()),
@@ -104,8 +103,8 @@ if __name__ == "__main__":
104
  "DicomProcessorTool",
105
  "ChestXRayClassifierTool",
106
  "ChestXRaySegmentationTool",
107
- # "ChestXRayReportGeneratorTool",
108
- # "XRayVQATool",
109
  # "LlavaMedTool",
110
  # "XRayPhraseGroundingTool",
111
  # "ChestXRayGeneratorTool",
@@ -119,7 +118,6 @@ if __name__ == "__main__":
119
  if base_url := os.getenv("OPENAI_BASE_URL"):
120
  openai_kwargs["base_url"] = base_url
121
 
122
- print(f"OPENAI arguments - {openai_kwargs}")
123
  agent, tools_dict = initialize_agent(
124
  "medrax/docs/system_prompts.txt",
125
  tools_to_use=selected_tools,
 
76
 
77
  checkpointer = MemorySaver()
78
  model = ChatOpenAI(model=model, temperature=temperature, top_p=top_p, **openai_kwargs)
 
79
  agent = ChatAgent(
80
  model,
81
  tools=list(tools_dict.values()),
 
103
  "DicomProcessorTool",
104
  "ChestXRayClassifierTool",
105
  "ChestXRaySegmentationTool",
106
+ "ChestXRayReportGeneratorTool",
107
+ "XRayVQATool",
108
  # "LlavaMedTool",
109
  # "XRayPhraseGroundingTool",
110
  # "ChestXRayGeneratorTool",
 
118
  if base_url := os.getenv("OPENAI_BASE_URL"):
119
  openai_kwargs["base_url"] = base_url
120
 
 
121
  agent, tools_dict = initialize_agent(
122
  "medrax/docs/system_prompts.txt",
123
  tools_to_use=selected_tools,
medrax/agent/agent.py CHANGED
@@ -117,11 +117,11 @@ class ChatAgent:
117
  Dict[str, List[AnyMessage]]: A dictionary containing the model's response.
118
  """
119
  messages = state["messages"]
120
- print('process_request input', state)
121
  if self.prompts:
122
  messages = [SystemMessage(content=self.prompts["MEDICAL_ASSISTANT"])] + messages
123
  response = self.model.invoke(messages)
124
- print('process_request output', response)
125
  return {"messages": [response]}
126
 
127
  def has_tool_calls(self, state: AgentState) -> bool:
 
117
  Dict[str, List[AnyMessage]]: A dictionary containing the model's response.
118
  """
119
  messages = state["messages"]
120
+ # print('process_request input', state)
121
  if self.prompts:
122
  messages = [SystemMessage(content=self.prompts["MEDICAL_ASSISTANT"])] + messages
123
  response = self.model.invoke(messages)
124
+ # print('process_request output', response)
125
  return {"messages": [response]}
126
 
127
  def has_tool_calls(self, state: AgentState) -> bool: