Charles Grandjean commited on
Commit
9a9d495
·
1 Parent(s): 27d80a8

fix and add secrets

Browse files
Files changed (3) hide show
  1. add_secrets.ipynb +1 -1
  2. agent_api.py +42 -8
  3. langraph_agent.py +9 -3
add_secrets.ipynb CHANGED
@@ -40,7 +40,7 @@
40
  "name": "stdout",
41
  "output_type": "stream",
42
  "text": [
43
- "Uploaded 77 secrets to Cyberlgl/CyberLegalAIendpoint.\n"
44
  ]
45
  }
46
  ],
 
40
  "name": "stdout",
41
  "output_type": "stream",
42
  "text": [
43
+ "Uploaded 80 secrets to Cyberlgl/CyberLegalAIendpoint.\n"
44
  ]
45
  }
46
  ],
agent_api.py CHANGED
@@ -27,6 +27,7 @@ from pdf_analyzer import PDFAnalyzerAgent
27
  from langchain_openai import ChatOpenAI
28
  from mistralai import Mistral
29
  import logging
 
30
  import base64
31
  import tempfile
32
  import os as pathlib
@@ -153,15 +154,14 @@ class CyberLegalAPI:
153
  self.agent_lawyer = CyberLegalAgent(llm=llm, system_prompt=SYSTEM_PROMPT_LAWYER, tools=tools.tools_for_lawyer)
154
  self.pdf_analyzer = PDFAnalyzerAgent(llm=llm, mistral_client=mistral_client)
155
  self.conversation_manager = ConversationManager()
156
- self.base_lawyer_prompt = SYSTEM_PROMPT_LAWYER
157
  logger.info(f"🔧 CyberLegalAPI initialized with {llm_provider.upper()} provider")
158
 
159
  def _build_lawyer_prompt(self, document_analyses: Optional[List[DocumentAnalysis]], jurisdiction: str) -> str:
160
  """Build lawyer prompt with optional document context"""
161
  if not document_analyses:
162
- return self.base_lawyer_prompt.format(jurisdiction=jurisdiction)
163
 
164
- docs_text = "\n\n### Available Document Analyses in Your Workspace\n"
165
  for i, doc in enumerate(document_analyses, 1):
166
  docs_text += f"[Doc {i}] {doc.file_name}\n"
167
  if doc.summary: docs_text += f"Summary: {doc.summary}\n"
@@ -169,7 +169,8 @@ class CyberLegalAPI:
169
  if doc.key_details: docs_text += f"Key Details: {doc.key_details}\n"
170
  docs_text += "\n"
171
 
172
- return self.base_lawyer_prompt.format(jurisdiction=jurisdiction) + docs_text + "Consider these documents when relevant.\n"
 
173
 
174
  async def process_request(self, request: ChatRequest) -> ChatResponse:
175
  """
@@ -199,14 +200,23 @@ class CyberLegalAPI:
199
  "content": msg.content
200
  })
201
 
 
 
 
202
  try:
203
  # Build dynamic system prompt for lawyers with document analyses
204
- system_prompt = None
205
  if request.userType == "lawyer" and request.documentAnalyses:
206
  system_prompt = self._build_lawyer_prompt(request.documentAnalyses, request.jurisdiction)
207
  logger.info(f"📚 Using lawyer prompt with {len(request.documentAnalyses)} document analyses")
 
 
 
 
 
 
208
 
209
  # Process through selected agent with raw message and conversation history
 
210
  result = await agent.process_query(
211
  user_query=request.message,
212
  conversation_history=conversation_history,
@@ -214,6 +224,8 @@ class CyberLegalAPI:
214
  system_prompt=system_prompt
215
  )
216
 
 
 
217
  # Create response
218
  response = ChatResponse(
219
  response=result["response"],
@@ -223,12 +235,22 @@ class CyberLegalAPI:
223
  error=result.get("error")
224
  )
225
 
 
226
  return response
227
 
228
  except Exception as e:
 
 
 
 
229
  raise HTTPException(
230
  status_code=500,
231
- detail=f"Processing failed: {str(e)}"
 
 
 
 
 
232
  )
233
 
234
  async def health_check(self) -> HealthResponse:
@@ -300,10 +322,17 @@ class CyberLegalAPI:
300
  logger.debug(f"🗑️ Cleaned up temporary file: {tmp_file_path}")
301
 
302
  except Exception as e:
 
303
  logger.error(f"❌ PDF analysis failed: {str(e)}")
 
304
  raise HTTPException(
305
  status_code=500,
306
- detail=f"PDF analysis failed: {str(e)}"
 
 
 
 
 
307
  )
308
 
309
  # Initialize API instance
@@ -405,13 +434,18 @@ async def root():
405
  @app.exception_handler(Exception)
406
  async def global_exception_handler(request, exc):
407
  """
408
- Global exception handler
409
  """
 
 
 
 
410
  return JSONResponse(
411
  status_code=500,
412
  content={
413
  "error": "Internal server error",
414
  "detail": str(exc),
 
415
  "timestamp": datetime.now().isoformat()
416
  }
417
  )
 
27
  from langchain_openai import ChatOpenAI
28
  from mistralai import Mistral
29
  import logging
30
+ import traceback
31
  import base64
32
  import tempfile
33
  import os as pathlib
 
154
  self.agent_lawyer = CyberLegalAgent(llm=llm, system_prompt=SYSTEM_PROMPT_LAWYER, tools=tools.tools_for_lawyer)
155
  self.pdf_analyzer = PDFAnalyzerAgent(llm=llm, mistral_client=mistral_client)
156
  self.conversation_manager = ConversationManager()
 
157
  logger.info(f"🔧 CyberLegalAPI initialized with {llm_provider.upper()} provider")
158
 
159
  def _build_lawyer_prompt(self, document_analyses: Optional[List[DocumentAnalysis]], jurisdiction: str) -> str:
160
  """Build lawyer prompt with optional document context"""
161
  if not document_analyses:
162
+ return SYSTEM_PROMPT_LAWYER.format(jurisdiction=jurisdiction)
163
 
164
+ docs_text = "\n\n### Documents parsed in the lawyer profile\n"
165
  for i, doc in enumerate(document_analyses, 1):
166
  docs_text += f"[Doc {i}] {doc.file_name}\n"
167
  if doc.summary: docs_text += f"Summary: {doc.summary}\n"
 
169
  if doc.key_details: docs_text += f"Key Details: {doc.key_details}\n"
170
  docs_text += "\n"
171
 
172
+ docs_text += "Use these documents if the user's question is related to their content.\n"
173
+ return SYSTEM_PROMPT_LAWYER.format(jurisdiction=jurisdiction) + docs_text
174
 
175
  async def process_request(self, request: ChatRequest) -> ChatResponse:
176
  """
 
200
  "content": msg.content
201
  })
202
 
203
+ logger.info(f"🚀 Starting request processing - user_type: {request.userType}, jurisdiction: {request.jurisdiction}")
204
+ logger.info(f"💬 User query: {request.message}")
205
+
206
  try:
207
  # Build dynamic system prompt for lawyers with document analyses
 
208
  if request.userType == "lawyer" and request.documentAnalyses:
209
  system_prompt = self._build_lawyer_prompt(request.documentAnalyses, request.jurisdiction)
210
  logger.info(f"📚 Using lawyer prompt with {len(request.documentAnalyses)} document analyses")
211
+ elif request.userType == "lawyer":
212
+ system_prompt = SYSTEM_PROMPT_LAWYER.format(jurisdiction=request.jurisdiction)
213
+ logger.info(f"📝 Using default lawyer prompt with jurisdiction: {request.jurisdiction}")
214
+ else:
215
+ system_prompt = SYSTEM_PROMPT_CLIENT.format(jurisdiction=request.jurisdiction)
216
+ logger.info(f"👤 Using client prompt with jurisdiction: {request.jurisdiction}")
217
 
218
  # Process through selected agent with raw message and conversation history
219
+ logger.info(f"🤖 Calling agent.process_query with jurisdiction: {request.jurisdiction}")
220
  result = await agent.process_query(
221
  user_query=request.message,
222
  conversation_history=conversation_history,
 
224
  system_prompt=system_prompt
225
  )
226
 
227
+ logger.info(f"✅ Agent processing completed successfully")
228
+
229
  # Create response
230
  response = ChatResponse(
231
  response=result["response"],
 
235
  error=result.get("error")
236
  )
237
 
238
+ logger.info(f"📤 Returning response to user")
239
  return response
240
 
241
  except Exception as e:
242
+ # Log full traceback for debugging
243
+ error_traceback = traceback.format_exc()
244
+ logger.error(f"❌ Request processing failed: {str(e)}")
245
+ logger.error(f"🔍 Full traceback:\n{error_traceback}")
246
  raise HTTPException(
247
  status_code=500,
248
+ detail={
249
+ "error": "Processing failed",
250
+ "message": str(e),
251
+ "traceback": error_traceback,
252
+ "timestamp": datetime.now().isoformat()
253
+ }
254
  )
255
 
256
  async def health_check(self) -> HealthResponse:
 
322
  logger.debug(f"🗑️ Cleaned up temporary file: {tmp_file_path}")
323
 
324
  except Exception as e:
325
+ error_traceback = traceback.format_exc()
326
  logger.error(f"❌ PDF analysis failed: {str(e)}")
327
+ logger.error(f"🔍 Full traceback:\n{error_traceback}")
328
  raise HTTPException(
329
  status_code=500,
330
+ detail={
331
+ "error": "PDF analysis failed",
332
+ "message": str(e),
333
+ "traceback": error_traceback,
334
+ "timestamp": datetime.now().isoformat()
335
+ }
336
  )
337
 
338
  # Initialize API instance
 
434
  @app.exception_handler(Exception)
435
  async def global_exception_handler(request, exc):
436
  """
437
+ Global exception handler with full traceback for debugging
438
  """
439
+ error_traceback = traceback.format_exc()
440
+ logger.error(f"❌ Unhandled exception: {str(exc)}")
441
+ logger.error(f"🔍 Full traceback:\n{error_traceback}")
442
+
443
  return JSONResponse(
444
  status_code=500,
445
  content={
446
  "error": "Internal server error",
447
  "detail": str(exc),
448
+ "traceback": error_traceback,
449
  "timestamp": datetime.now().isoformat()
450
  }
451
  )
langraph_agent.py CHANGED
@@ -5,6 +5,7 @@ Agent can call tools, process results, and decide to continue or answer
5
  """
6
 
7
  import os
 
8
  import logging
9
  from typing import Dict, Any, List, Optional
10
  from datetime import datetime
@@ -23,7 +24,6 @@ from tools import tools, tools_for_client, tools_for_lawyer
23
  class CyberLegalAgent:
24
  def __init__(self, llm, system_prompt: str = SYSTEM_PROMPT_CLIENT, tools: List[Any] = tools):
25
  self.tools = tools
26
- self.system_prompt = system_prompt
27
  self.llm = llm
28
  self.performance_monitor = PerformanceMonitor()
29
  self.llm_with_tools = self.llm.bind_tools(self.tools)
@@ -54,8 +54,14 @@ class CyberLegalAgent:
54
 
55
  if not intermediate_steps:
56
  history = state.get("conversation_history", [])
57
- # Use provided system prompt if available, otherwise use the default
58
- system_prompt_to_use = state.get("system_prompt", self.system_prompt)
 
 
 
 
 
 
59
  intermediate_steps.append(SystemMessage(content=system_prompt_to_use))
60
  for msg in history:
61
  if isinstance(msg, dict):
 
5
  """
6
 
7
  import os
8
+ import copy
9
  import logging
10
  from typing import Dict, Any, List, Optional
11
  from datetime import datetime
 
24
  class CyberLegalAgent:
25
  def __init__(self, llm, system_prompt: str = SYSTEM_PROMPT_CLIENT, tools: List[Any] = tools):
26
  self.tools = tools
 
27
  self.llm = llm
28
  self.performance_monitor = PerformanceMonitor()
29
  self.llm_with_tools = self.llm.bind_tools(self.tools)
 
54
 
55
  if not intermediate_steps:
56
  history = state.get("conversation_history", [])
57
+ # Use provided system prompt if available (not None), otherwise use the default
58
+ system_prompt_to_use = state.get("system_prompt")
59
+ jurisdiction = state.get("jurisdiction", "unknown")
60
+ # Deepcopy to avoid modifying the original prompt string
61
+ system_prompt_to_use = copy.deepcopy(system_prompt_to_use)
62
+ system_prompt_to_use = system_prompt_to_use.format(jurisdiction=jurisdiction)
63
+ logger.info(f"📍 Formatted system prompt with jurisdiction: {jurisdiction}")
64
+
65
  intermediate_steps.append(SystemMessage(content=system_prompt_to_use))
66
  for msg in history:
67
  if isinstance(msg, dict):