File size: 48,854 Bytes
948951c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df4b4c7
 
 
 
 
 
 
 
 
 
 
 
2201ca7
 
 
 
 
df4b4c7
 
 
 
2201ca7
df4b4c7
 
 
 
 
 
 
 
 
 
 
 
948951c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df4b4c7
 
 
948951c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df4b4c7
 
 
948951c
 
 
 
 
 
 
 
e346232
df4b4c7
 
 
e346232
df4b4c7
 
 
 
e346232
df4b4c7
948951c
 
 
 
 
e346232
 
 
948951c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df4b4c7
 
 
948951c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df4b4c7
 
 
948951c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
df4b4c7
 
 
948951c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
import gradio as gr
import asyncio
import os
import uuid
import json
import requests
import httpx
import fitz  # PyMuPDF
import openpyxl # openpyxl for creating Excel reports
import docx
import pytesseract
import cv2
import numpy as np
from PIL import Image
from openpyxl.styles import Alignment
from typing import List, Dict, Any
from dotenv import load_dotenv
from fastmcp import Client
from fastmcp.client.transports import StdioTransport
from pathlib import Path

# OpenHands POC imports (optional - gracefully handle if not installed)
try:
    from openhands_poc.executor import get_executor
    OPENHANDS_AVAILABLE = True
except ImportError:
    print("OpenHands POC not available. Install with: pip install openhands-ai")
    OPENHANDS_AVAILABLE = False
    get_executor = None


def get_file_path(file_obj) -> str:
    """
    Extract file path from Gradio file object.
    Handles different Gradio versions (3.x, 4.x, 5.x).
    """
    if file_obj is None:
        return None

    # Gradio 5.x: file_obj is often a string path directly
    if isinstance(file_obj, str):
        return file_obj

    # Gradio 5.x namespace object: path is in .value attribute
    if hasattr(file_obj, 'value'):
        value = file_obj.value
        if isinstance(value, str) and len(value) > 5:
            return value

    # Gradio 3.x/4.x: object with .name attribute containing the path
    if hasattr(file_obj, 'name'):
        name_val = file_obj.name
        if isinstance(name_val, str) and len(name_val) > 5 and '/' in name_val:
            return name_val

    # Try .path attribute
    if hasattr(file_obj, 'path'):
        path_val = file_obj.path
        if isinstance(path_val, str) and len(path_val) > 5:
            return path_val

    # Last resort: try to get any string representation
    raise ValueError(f"Cannot extract file path from {type(file_obj)}: {file_obj}")


# Load environment variables
load_dotenv()

# --- Agent Tool Specifications ---

EVERYTHING_SEARCH_TOOL_SPEC = [
    {
        "type": "function",
        "function": {
            "name": "search",
            "description": "Search for files and folders on the local system using the Everything search engine.",
            "parameters": {
                "type": "object",
                "properties": {
                    "query": {
                        "type": "string",
                        "description": "The search query string. Supports Everything search syntax (e.g., '*.py', 'ext:log', 'datemodified:today')."
                    },
                    "max_results": {
                        "type": "integer",
                        "description": "Maximum number of results to return. Default is 100."
                    },
                    "sort_by": {
                        "type": "integer",
                        "description": "Sort order for results. For example, 14 sorts by modification date (newest first). Default is 1 (filename A-Z)."
                    }
                },
                "required": ["query"],
            },
        },
    }
]


MCP_SERVER_URL = "http://localhost:8017/mcp"

AZURE_KEY = os.getenv("AZURE_OPENAI_API_KEY")
AZURE_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")  # e.g., "https://<your-resource-name>.openai.azure.com/"
DEPLOYMENT_NAME = os.getenv("AZURE_DEPLOYMENT_NAME")  # e.g., "gpt-5-mini"
DEFAULT_TIMEOUT = 300
# os.environ["EVERYTHING_SDK_DLL"] = os.getenv("EVERYTHING_SDK_DLL")

# Verify keys
if not AZURE_KEY or not AZURE_ENDPOINT or not DEPLOYMENT_NAME:
    raise RuntimeError("Make sure AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, and AZURE_DEPLOYMENT_NAME are set in .env")

# Persistent requests session
SESSION = requests.Session()


def chat_with_functions(messages: List[Dict[str, Any]], tools: List[Dict[str, Any]] = None) -> Dict[str, Any]:
    """Send messages to Azure Foundry model for text completion."""
    url = f"{AZURE_ENDPOINT}openai/deployments/{DEPLOYMENT_NAME}/chat/completions?api-version=2024-02-01"

    payload = {
        "messages": messages,
        "max_completion_tokens": 8192,
    }
    
    if tools:
        payload["tools"] = tools
        payload["tool_choice"] = "auto"

    headers = {
        "api-key": AZURE_KEY,
        "Content-Type": "application/json"
    }

    try:
        response = SESSION.post(url, headers=headers, json=payload, timeout=DEFAULT_TIMEOUT)
        response.raise_for_status()
    except requests.exceptions.HTTPError as errh:
        raise RuntimeError(f"HTTP Error: {errh} - {response.text}")
    except requests.exceptions.RequestException as err:
        raise RuntimeError(f"Request failed: {err}")

    try:
        return response.json()
    except json.JSONDecodeError:
        raise RuntimeError(f"Failed to parse JSON response: {response.text}")





# --- Tesseract OCR Configuration ---
# The line below is necessary if Tesseract is not in your system's PATH.
# Uncomment and update the path to your Tesseract executable if you get an error.
# Example for Windows:
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
# -----------------------------------


def extract_schedule_section(full_text: str, classification: str) -> str:
    """
    Isolates sections mentioning "boiler" using a context window approach.
    """
    print("--- Isolating appliance classification sections using context windows ---")
    lines = full_text.splitlines()
    if not lines:
        return ""

    # Define the context window size (lines before and after the keyword)
    LINES_BEFORE = 0
    LINES_AFTER = 200

    # 1. Find all lines containing the keyword "boiler"
    target_indices = [
        i for i, line in enumerate(lines) if classification in line.lower()
    ]

    if not target_indices:
        print("--- Warning: Keyword appliance classification not found. Using full text. ---")
        return full_text

    # 2. Create a set of all line numbers to include (handles overlaps automatically)
    lines_to_include = set()
    for index in target_indices:
        start = max(0, index - LINES_BEFORE)
        end = min(len(lines), index + LINES_AFTER + 1)
        for i in range(start, end):
            lines_to_include.add(i)

    # 3. Build the result from the sorted line numbers
    result_lines = [
        lines[i] for i in sorted(list(lines_to_include))
    ]
    
    result_text = "\n".join(result_lines)
    print(result_text)
    print(f"--- Successfully isolated appliance classification sections. New length: {len(result_text)} characters ---")
    return result_text



def create_comparison_report(comparison_data: str, output_filepath: str) -> str:
    """
    Creates an Excel report by parsing a Markdown table and writing it to cells.
    Uses simple cell-by-cell parsing: skip \n and separator cells, write everything else.
    """
    try:
        workbook = openpyxl.Workbook()
        sheet = workbook.active
        sheet.title = "Comparison Report"

        # Replace literal \n with actual newlines if present
        if '\\n' in comparison_data:
            comparison_data = comparison_data.replace('\\n', '\n')

        lines = comparison_data.strip().splitlines()

        # Clean up Rich console formatting artifacts
        cleaned_lines = []
        for line in lines:
            # Remove box-drawing characters used by Rich
            line = line.replace('│', '').replace('─', '').replace('┌', '').replace('┐', '')
            line = line.replace('└', '').replace('┘', '').replace('├', '').replace('┤', '')
            line = line.strip()
            if line:
                cleaned_lines.append(line)

        excel_row = 1
        separator_seen = False

        for line in cleaned_lines:
            # Skip empty lines
            if not line.strip():
                continue

            # Only process lines that look like table rows (start with |)
            if not line.startswith('|'):
                continue

            # Split by | and clean up
            cells = [cell.strip() for cell in line.split('|')]

            # Remove empty strings from leading/trailing '|'
            if len(cells) > 0 and cells[0] == '':
                cells.pop(0)
            if len(cells) > 0 and cells[-1] == '':
                cells.pop(-1)

            # Check if this is a separator line (all cells start with - or :)
            is_separator = all(cell.startswith('-') or cell.startswith(':') for cell in cells if cell)

            if is_separator:
                # Insert a blank row after header for readability
                print(f"Separator detected, inserting blank row at {excel_row}")
                excel_row += 1
                separator_seen = True
                continue

            # Filter out cells that are just newlines or separator markers
            valid_cells = []
            for cell in cells:
                # Skip cells that are empty, just \n, or separator markers (---, :---, ---:, etc.)
                if not cell or cell == '\\n':
                    continue
                if cell.startswith('-') or cell.startswith(':'):
                    continue
                valid_cells.append(cell)

            # Only write rows with valid content
            if valid_cells:
                print(f"Writing row {excel_row} with {len(valid_cells)} cells: {valid_cells[0][:40]}...")
                for col_idx, cell_data in enumerate(valid_cells):
                    sheet.cell(row=excel_row, column=col_idx + 1, value=cell_data)
                excel_row += 1

        print(f"Total rows written to Excel: {excel_row - 1}")
        
        # Enable text wrapping for all populated cells
        for row in sheet.iter_rows():
            for cell in row:
                cell.alignment = Alignment(wrap_text=True, vertical='top')

        # Auto-adjust column widths with a max cap
        for col in sheet.columns:
            max_length = 0
            column = col[0].column_letter  # Get the column name
            for cell in col:
                try:
                    if len(str(cell.value)) > max_length:
                        max_length = len(str(cell.value))
                except:
                    pass
            # Add padding and cap the width at 45
            adjusted_width = min(max_length + 2, 45)
            sheet.column_dimensions[column].width = adjusted_width

        # Add fixed reminders below the table
        current_row = sheet.max_row + 2  # Two rows below the last row of the table
        sheet.cell(row=current_row, column=1, value="Reminders:")
        sheet.cell(row=current_row, column=1).alignment = Alignment(wrap_text=True, vertical='top')
        current_row += 1

        reminders = [
            "1) Here you can add reminders for the Engineer checking the document",
            "2) Or just motivational phrases for your Consultants: You got this!!",
        ]

        for reminder in reminders:
            sheet.cell(row=current_row, column=1, value=reminder)
            sheet.cell(row=current_row, column=1).alignment = Alignment(wrap_text=True, vertical='top')
            current_row += 1

        # Save the workbook
        workbook.save(output_filepath)
        return f"Comparison report successfully created at: {output_filepath}"
    except Exception as e:
        # Add more context to the error
        error_message = f"Error creating Excel report. The AI may have returned data in an unexpected format. Raw data was:\n---\n{comparison_data}\n---\nError: {e}"
        raise RuntimeError(error_message)


def expert_agent(schedule_text: str, drawing_text: str, spec_text: str, classification) -> str:
    """
    Calls the Azure OpenAI model with the combined text from three documents
    and asks it to generate a comparison table.
    """
    print("--- Calling AI to generate comparison table ---")
    
    messages = [
        {
            "role": "system",
            "content": f"You are a '{classification} Shop Drawing Expert' assistant. You will be given the extracted text from three documents: a {classification} schedule, a shop drawing, and a spec sheet. "
                       "Your task is to meticulously compare the information from these sources. "
                       "Identify key parameters (e.g., model number, capacity, pressure, dimensions, power requirements, etc.) and present your findings in a clear Markdown table. "
                       "The table should contain 6 columns for every row, the parameter, the schedule's data, drawing data, spec sheet data (set any cell as N/A if data not found), "
                       "a column with a check or x depending on the result of the comparison, and an explanation of the comparison."
                       "Note that the following abbreviations are used: W.P.D.: water pressure drop, E.F.T.: entering water temperature, L.F.T.: leaving water temperature."
                       "Do not offer further work or next steps."
        },
        {
            "role": "user",
            "content": (
                "Please compare the following documents:\n\n"
                f"--- {classification} SCHEDULE DATA ---\n"
                f"{schedule_text}\n\n"
                "--- SHOP DRAWING DATA ---\n"
                f"{drawing_text}\n\n"
                f"--- {classification} SPEC SHEET DATA ---\n"
                f"{spec_text}\n"
            )
        }
    ]

    # No functions are needed for this call. The agent's job is just to return text.
    response = chat_with_functions(messages, tools=None)

    if not response.get("choices"):
        raise RuntimeError(f"Invalid response from API: {response}")

    final_content = response["choices"][0].get("message", {}).get("content", "No content returned from AI.")
    print(f"--- AI returned comparison content ---")
    return final_content


# The agent no longer needs to know how to read files or create reports.
# This will be handled by the main application logic.
FUNCTIONS_SPECS = []


def _get_file_snippet(file_path: str, file_label: str) -> str:
    """
    Extracts the first ~2000 characters of text from a file for quick classification.
    This is a lighter version of _process_file.
    """
    print(f"--- Getting snippet for {file_label}: {file_path} ---")
    if not os.path.exists(file_path):
        raise FileNotFoundError(f"File not found at: {file_path}")

    file_extension = os.path.splitext(file_path)[1].lower()
    text = ""
    try:
        if file_extension == '.pdf':
            with fitz.open(file_path) as doc:
                # Extract text from the first 2 pages for a snippet
                for page_num, page in enumerate(doc):
                    if page_num >= 2:
                        break
                    text += page.get_text()
        
        elif file_extension == '.docx':
            doc = docx.Document(file_path)
            for i, para in enumerate(doc.paragraphs):
                if i >= 15: # Approx first 15 paragraphs
                    break
                text += para.text + "\n"

        elif file_extension == '.png':
            with Image.open(file_path) as img:
                # For images, we do a quick OCR without heavy pre-processing
                text = pytesseract.image_to_string(img, timeout=30) # 30s timeout for quick OCR

        else:
            # For other text-based files, read the first 2000 chars
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                text = f.read(2000)
        
        return text[:2000] # Truncate to 2000 characters
    except Exception as e:
        print(f"--- Warning: Could not get snippet for {file_label}. Error: {e} ---")
        return "" # Return empty string on error


def run_receptionist_agent(schedule_file: Any, drawing_file: Any, spec_file: Any) -> str:
    """
    Analyzes snippets of the three files to classify the job type.
    Returns a single keyword: 'boiler', 'fan', 'mismatched', or 'unsupported'.
    """
    print("--- Receptionist Agent: Classifying files ---")

    # 1. Get snippets from each file
    schedule_snippet = _get_file_snippet(get_file_path(schedule_file), "Appliance Schedule")
    drawing_snippet = _get_file_snippet(get_file_path(drawing_file), "Shop Drawing")
    spec_snippet = _get_file_snippet(get_file_path(spec_file), "Spec Sheet")

    # 2. Check for empty snippets which indicate processing errors
    if not all([schedule_snippet, drawing_snippet, spec_snippet]):
        print("--- Receptionist found an issue reading one or more files. ---")
        return "unsupported" # Treat as unsupported if a file can't be read

    # 3. Construct the prompt for the classification agent
    messages = [
        {
            "role": "system",
            "content": (
                "You are a receptionist agent for a mechanical engineering firm. Your job is to look at the initial text from up to three uploaded documents: a schedule, a shop drawing, and a spec sheet. "
                "Your task is to determine if these documents are all related to a certain kind of appliance. "
                "If all documents fit into one of the valid categories, return just the category as an answer. If documents are from different "
                "appliances, return 'mismatched', otherwise return 'unsupported'. Here are the valid categories: fire stopping, piping, waste and overflow, fixture, pump, sump pits, "
                "catch basins, drain, coupling, hydrants, copper system, medical gas, "
                "boiler, cooling towers, grilles, registers, diffusers, water treatment, "
                "tank, joint, valve, air unit, silencer, fan, drive, starter, vent,"
                "heat exchanger, louver, damper, air conditioner, air separator, fan coil,"
                "air ionization, heater, chiller, alarm, fitting, riser, trench heaters,"
                "ventilator, automation, BAS, MUA, lighting, generator, switch, fire stop,"
                "breaker, load center, elevator control, panelboard, switchboard,"
                "electrical panel, cable, conduit, TMGB, terminal ground bar, clamp,"
                "fiberrunner, box, boite, connecteur, connector, coupling , strap, clip,"
                "nut, rod, bracket, cantruss, alarm, smoke detector, composite sheet,"
                "fire barrier, snappro, fire protection, sprinkler, lighting, light fixture,"
                "installation, glass, window, glazing, hardware, frame, door, accessories,"
                "headwall, equipment, bike rack, wire guard, architectural grill, fireplace,"
                "chute, brick."
            )
        },
        {
            "role": "user",
            "content": (
                "Please classify the following document set:\n\n"
                "--- SCHEDULE SNIPPET ---\n"
                f"{schedule_snippet}\n\n"
                "--- SHOP DRAWING SNIPPET ---\n"
                f"{drawing_snippet}\n\n"
                "--- SPEC SHEET SNIPPET ---\n"
                f"{spec_snippet}\n"
            )
        }
    ]

    # 4. Call the LLM
    try:
        response = chat_with_functions(messages, tools=None)
        if not response.get("choices"):
            raise RuntimeError(f"Invalid response from API: {response}")
        
        classification = response["choices"][0].get("message", {}).get("content", "").strip().lower()
        
        # Clean the response to be only the keyword
        result = classification
            
        print(f"--- Receptionist classification: {result} ---")
        return result

    except Exception as e:
        print(f"--- Error during receptionist classification: {e} ---")
        return "unsupported" # Default to unsupported on error


async def handle_user_openhands(schedule_file: gr.File, drawing_file: gr.File, spec_file: gr.File) -> list:
    """
    OpenHands SDK-based document processing workflow.
    Uses ReceptionistAgentSDK → ExpertAgentSDK sequential coordination via executor.
    Returns: [report_file, metadata_md, schedule_clear, drawing_clear, spec_clear, log_content]
    """
    if not schedule_file or not drawing_file or not spec_file:
        return [None, "Please upload all three files.", None, None, None, ""]

    if not OPENHANDS_AVAILABLE:
        return [None, "OpenHands is not available. Using legacy implementation.", None, None, None, ""]

    try:
        # Use executor with SDK agents (ReceptionistAgentSDK → ExpertAgentSDK)
        from openhands_poc.executor import get_executor, is_sdk_available

        # Check if SDK is available
        if not is_sdk_available():
            # Fall back to legacy OpenHands v0.62.0
            print("=== Using legacy agents ===")
            from openhands_poc.agents import ReceptionistAgent
            receptionist = ReceptionistAgent()
            classification, comparison_table, metadata = await receptionist.classify_and_compare_async(
                get_file_path(schedule_file),
                get_file_path(drawing_file),
                get_file_path(spec_file),
            )
        else:
            # Use SDK agents via executor (run sync method in executor)
            print("=== Using SDK agents via executor ===")
            import asyncio
            executor = get_executor()
            loop = asyncio.get_event_loop()

            # Debug: Log file object details
            print(f"=== schedule_file type: {type(schedule_file)}, repr: {repr(schedule_file)} ===")
            print(f"=== drawing_file type: {type(drawing_file)}, repr: {repr(drawing_file)} ===")
            print(f"=== spec_file type: {type(spec_file)}, repr: {repr(spec_file)} ===")

            # Extract paths using robust helper function
            schedule_path = get_file_path(schedule_file)
            drawing_path = get_file_path(drawing_file)
            spec_path = get_file_path(spec_file)

            print(f"=== Extracted paths: schedule={schedule_path}, drawing={drawing_path}, spec={spec_path} ===")

            # Wrap in lambda to ensure proper argument binding (SDK pattern)
            comparison_table, metadata = await loop.run_in_executor(
                None,
                lambda: executor.classify_and_compare_with_sdk(
                    schedule_path,
                    drawing_path,
                    spec_path,
                )
            )
            print(f"=== Executor returned ===")
            print(f"=== comparison_table length: {len(comparison_table) if comparison_table else 0} ===")
            print(f"=== metadata keys: {list(metadata.keys())} ===")
            print(f"=== metadata: {metadata} ===")
            classification = metadata.get('classification', 'unknown')
            print(f"=== classification: {classification} ===")

        # Read log file if available (check both top-level and expert_metadata)
        print("=== Reading log file ===")
        log_content = ""
        log_file = metadata.get('log_file', '')
        if not log_file and 'expert_metadata' in metadata:
            log_file = metadata['expert_metadata'].get('log_file', '')

        if log_file and os.path.exists(log_file):
            try:
                with open(log_file, 'r', encoding='utf-8') as f:
                    log_content = f.read()
            except Exception as e:
                log_content = f"Error reading log file: {str(e)}"

        # Check for errors
        print(f"=== Checking for errors, comparison_table empty: {not comparison_table} ===")
        if not comparison_table:
            print(f"=== No comparison table, classification: {classification} ===")
            if classification == "unsupported":
                error_message = "I do not have an expert for this type of equipment, or there was an error reading the files."
            elif classification == "mismatched":
                error_message = "The uploaded files appear to be mismatched. Please ensure all three documents are for the same piece of equipment."
            else:
                error_message = f"Failed to generate comparison. Error: {metadata.get('error', metadata.get('expert_error', 'Unknown error'))}"

            # Include log file link
            if log_file:
                error_message += f"\n\nLog file: {log_file}"

            print(f"=== Returning error: {error_message} ===")
            return [None, error_message, None, None, None, log_content]

        print("=== comparison_table exists, continuing to create report ===")

        # Store original filenames
        schedule_filename = os.path.basename(get_file_path(schedule_file))
        drawing_filename = os.path.basename(get_file_path(drawing_file))
        spec_filename = os.path.basename(get_file_path(spec_file))

        # Create Excel report from comparison table
        output_dir = "reports"
        os.makedirs(output_dir, exist_ok=True)
        unique_filename = f"comparison_report_{uuid.uuid4()}.xlsx"
        output_filepath = os.path.abspath(os.path.join(output_dir, unique_filename))

        create_comparison_report(comparison_table, output_filepath)

        print(f"--- Comparison report generated at: {output_filepath} ---")

        # Prepare metadata display
        workflow_info = metadata.get('workflow', 'legacy')
        sdk_version = metadata.get('sdk_version', 'N/A')

        if workflow_info == 'sequential_coordination':
            workflow_display = f"ReceptionistAgentSDK → ExpertAgentSDK (SDK v{sdk_version})"
        else:
            workflow_display = "Receptionist → Expert (legacy v0.62.0)"

        files_used_md = (
            f"Files used to create shown report:\n\n"
            f"**Appliance Schedule:** `{schedule_filename}`\n\n"
            f"**Shop Drawing:** `{drawing_filename}`\n\n"
            f"**Spec Sheet:** `{spec_filename}`\n\n"
            f"**Classification:** `{classification}`\n\n"
            f"**Agent Workflow:** {workflow_display}\n\n"
            f"**Log File:** `{metadata.get('log_file', 'N/A')}`"
        )

        print(f"=== Returning success: {output_filepath} ===")
        return [output_filepath, files_used_md, None, None, None, log_content]

    except Exception as e:
        error_message = f"An error occurred: {str(e)}"
        print(f"=== EXCEPTION in handle_user_openhands: {error_message} ===")
        import traceback
        traceback.print_exc()
        return [None, error_message, None, None, None, f"Error: {str(e)}\n{traceback.format_exc()}"]


def handle_user(schedule_file: gr.File, drawing_file: gr.File, spec_file: gr.File) -> list:
    """
    Handles user input, classifies files with a receptionist, then processes them,
    calls the expert agent, and returns multiple outputs as a list.

    This now uses the OpenHands implementation if available.
    """
    if not schedule_file or not drawing_file or not spec_file:
        return [None, "Please upload all three files.", None, None, None]

    # Use OpenHands implementation if available
    if OPENHANDS_AVAILABLE:
        import asyncio
        try:
            loop = asyncio.get_event_loop()
        except RuntimeError:
            loop = asyncio.new_event_loop()
            asyncio.set_event_loop(loop)

        return loop.run_until_complete(
            handle_user_openhands(schedule_file, drawing_file, spec_file)
        )

    # Legacy implementation (fallback)
    try:
        # --- Step 1: Call the Receptionist Agent to Classify Files ---
        classification = run_receptionist_agent(schedule_file, drawing_file, spec_file)

        if classification == 'unsupported':
            error_message = "Receptionist Agent: I do not have an expert for this type of equipment, or there was an error reading the files. This agent currently only handles boiler systems and fan coils."
            print(f"--- {error_message} ---")
            return [None, error_message, None, None, None, ""]
        elif classification == 'mismatched':
            error_message = "Receptionist Agent: The uploaded files appear to be mismatched. Please ensure all three documents (schedule, drawing, and spec) are for the same piece of equipment."
            print(f"--- {error_message} ---")
            return [None, error_message, None, None, None, ""]
        else: # One of the valid classifications
            print("--- Receptions declares classification as ", {classification}, "---")
            

        # --- Step 2: Proceed with full processing if classification is 'boiler' or 'fan' ---
        
        # Store original filenames
        schedule_filename = os.path.basename(get_file_path(schedule_file))
        drawing_filename = os.path.basename(get_file_path(drawing_file))
        spec_filename = os.path.basename(get_file_path(spec_file))

        # --- Local File Processing ---
        def _process_file(file_path: str, file_label: str) -> str:
            """Helper to process a single file and return its text content."""
            print(f"--- Processing {file_label}: {file_path} ---")
            if not os.path.exists(file_path):
                raise FileNotFoundError(f"File not found at: {file_path}")
                
            file_extension = os.path.splitext(file_path)[1].lower()
            
            try:
                if file_extension == '.pdf':
                    with fitz.open(file_path) as doc:
                        text = "\n".join(page.get_text(sort=True) for page in doc)
                    return text
                
                elif file_extension == '.docx':
                    doc = docx.Document(file_path)
                    text = "\n".join([para.text for para in doc.paragraphs])
                    return text

                elif file_extension == '.png':
                    # --- Advanced OCR Pre-processing for Screenshots using Otsu's Binarization ---
                    with Image.open(file_path) as img:
                        # 1. Convert Pillow Image to OpenCV format (NumPy array) in grayscale.
                        open_cv_image = np.array(img.convert('L'))

                        # 2. Resize the image. For small text, making it larger is crucial.
                        # A factor of 3x is a good starting point.
                        new_width = int(open_cv_image.shape[1] * 3)
                        new_height = int(open_cv_image.shape[0] * 3)
                        resized_img = cv2.resize(open_cv_image, (new_width, new_height), interpolation=cv2.INTER_CUBIC)

                        # 3. Apply Otsu's Binarization.
                        # This automatically determines the best global threshold value to separate
                        # text from the background, which is ideal for screenshots.
                        _, binarized_img = cv2.threshold(
                            resized_img,
                            0, # Threshold value (is ignored when using Otsu's method)
                            255, # Max value to assign to pixels
                            cv2.THRESH_BINARY + cv2.THRESH_OTSU # Use Otsu's algorithm
                        )
                        
                        # 4. Perform OCR on the processed image
                        text = pytesseract.image_to_string(binarized_img)
                    
                    print(f"--- Raw OCR output for {file_label} (PNG): ---\n{text}\n----------------------------------------------------")
                    return text
                else:
                    raise ValueError(f"Unsupported file type for {file_label}: {file_extension}. Please use PDF, DOCX, or PNG.")
            except Exception as e:
                # Catch and re-throw errors from extractors to be handled by the main try/except block
                raise RuntimeError(f"Error processing {file_label}: {e}") from e

        # 1. Extract the full text from files
        schedule_text_full = _process_file(get_file_path(schedule_file), "Appliance Schedule")
        drawing_text = _process_file(get_file_path(drawing_file), "Shop Drawing")
        spec_text = _process_file(get_file_path(spec_file), "Spec Sheet")

        # 2. Trim the schedule text to only the relevant sections
        schedule_text = extract_schedule_section(schedule_text_full, classification)

        # # --- Debugging Step: Save the extracted text to a file ---
        # debug_filepath = os.path.abspath(os.path.join("reports", "debug_schedule_extraction.txt"))
        # try:
        #     with open(debug_filepath, 'w', encoding='utf-8') as f:
        #         f.write(schedule_text)
        #     print(f"--- Debug file saved to: {debug_filepath} ---")
        # except Exception as e:
        #     print(f"--- Warning: Could not write debug file. Error: {e} ---")
        # # ---------------------------------------------------------

        # 3. Call the simplified agent to get the comparison
        comparison_markdown = expert_agent(schedule_text, drawing_text, spec_text, classification)
        

        # 4. Create the final Excel report
        output_dir = "reports"
        os.makedirs(output_dir, exist_ok=True)
        # Generate a unique filename to prevent race conditions
        unique_filename = f"comparison_report_{uuid.uuid4()}.xlsx"
        output_filepath = os.path.abspath(os.path.join(output_dir, unique_filename))
        
        create_comparison_report(comparison_markdown, output_filepath)
        
        print(f"--- Comparison report generated at: {output_filepath} ---")

        # 5. Prepare the outputs for the UI
        files_used_md = (
            f"Files used to create shown report:\n\n"
            f"**Appliance Schedule:** `{schedule_filename}`\n\n"
            f"**Shop Drawing:** `{drawing_filename}`\n\n"
            f"**Spec Sheet:** `{spec_filename}`"
        )
        
        # Return values in the same order as the `outputs` list
        return [output_filepath, files_used_md, None, None, None, "Legacy implementation - no agent logs available"]

    except Exception as e:
        # Return a user-friendly error message
        error_message = f"An error occurred: {str(e)}"
        print(error_message)
        return [None, error_message, None, None, None, f"Error: {str(e)}"]
    


import os
from fastmcp.client.transports import StdioTransport

# ... (rest of the file) ...

async def handle_search_interaction_openhands(
    user_query: str,
    history: list,
    enable_verification: bool = False,
    target_count: int = 10
):
    """
    OpenHands-based search interaction handler.

    Attempts to use SDK-based agent (openhands-sdk v1.2.0) first,
    then falls back to legacy LLM wrapper (openhands-ai),
    then to the original implementation.

    Args:
        user_query: User's search query
        history: Chat history
        enable_verification: If True, uses verification with sub-agents
        target_count: Number of verified documents to find (when verification enabled)
    """
    if not OPENHANDS_AVAILABLE:
        # Fallback to legacy implementation
        return await handle_search_interaction_legacy(user_query, history)

    # Convert history format: list of [user, agent] -> list of tuples (user, agent)
    conversation_history = []
    for item in history:
        if len(item) >= 2:
            conversation_history.append((item[0], item[1]))

    # Try SDK-based agent first (preferred - uses native MCP)
    try:
        from openhands_poc.agents import SearchAgentSDK, is_sdk_available

        if is_sdk_available():
            print("[SearchHandler] Using SDK-based SearchAgent (openhands-sdk v1.2.0)")
            search_agent = SearchAgentSDK()

            # Choose workflow based on verification toggle
            if enable_verification:
                # Use verification workflow with sub-agents (DelegateTool)
                print(f"[SearchHandler] Verification enabled, target_count={target_count}")
                agent_reply, metadata = await search_agent.search_with_verification_async(
                    user_query=user_query,
                    target_count=target_count,
                    conversation_history=conversation_history,
                )
            else:
                # Simple search without verification
                agent_reply, metadata = await search_agent.search_async(
                    user_query,
                    conversation_history=conversation_history,
                    target_count=target_count,
                )

            # Update history
            history.append([user_query, agent_reply])
            return history, history, ""

    except Exception as e:
        import traceback
        print(f"[SearchHandler] SDK agent failed: {e}")
        print(f"[SearchHandler] Error type: {type(e).__name__}")
        print(f"[SearchHandler] Full traceback:")
        traceback.print_exc()
        print("[SearchHandler] Falling back to legacy...")

    # Fallback to legacy LLM wrapper agent
    try:
        from openhands_poc.agents import SearchAgent

        print("[SearchHandler] Using legacy SearchAgent (openhands-ai LLM wrapper)")
        search_agent = SearchAgent()

        # Choose workflow based on verification toggle
        if enable_verification:
            # NEW: Use verification workflow with sub-agents
            agent_reply, metadata = await search_agent.search_with_verification_async(
                user_query=user_query,
                target_count=target_count,
                conversation_history=conversation_history,
                wave_size=15,
                max_documents_to_check=100
            )
        else:
            # Original: Simple search without verification
            agent_reply, metadata = await search_agent.search_async(
                user_query,
                conversation_history=conversation_history,
                max_iterations=3
            )

        # Update history
        history.append([user_query, agent_reply])

        return history, history, ""

    except Exception as e:
        error_message = f"Search error: {str(e)}"
        print(error_message)
        import traceback
        traceback.print_exc()

        history.append([user_query, error_message])
        return history, history, ""


async def handle_search_interaction_legacy(user_query: str, history: list):
    """
    Legacy search handler (original implementation).
    Handles the user's search query by orchestrating the Everything Search Agent.
    This version launches the server as a subprocess with the correct environment.
    """
    # 1. Append user message to history for display
    history.append([user_query, None])

    # 2. Construct messages for the LLM call
    system_prompt = (
        "When the user asks you to find files, formulate a query and use the 'search' tool. Remember you can only make one query per prompt." \
        "The results of the query will be returned to you to make readable if needed and filtered to ensure its only what the user actually wants."
        "User would want file path, name and type. Potentially when it was created, modified or accessed depending on the prompt." \
        "Remember these acronyms are always used instead of full phrases in the project folders: BC - British Columbia office, HC - health care sector, RE - residential sector," \
        "TF - tenant fit-out sector, ED - education sector, MC - mission critical (data centers) sector, HP - hospitality sector," \
        "NOR - north bay office, ST - science and technology sector, TT - transit and transportation sector, NCR - ottawa office," \
        "CM - commercial sector, CX - commissioning sector, SD - shop drawings." 
        "There are two kinds of project notations: Office-last2DigitsOfYear-sector-yearProjectNumber, or sector-last2DigitsOfYear-yearProjectNumber." \
        "DON'T use '*' ever. If looking for certain project categories, use 'path: what you expect to find in project notation' to search for certain folders."
        "Include years when searching for sectors, if user doesn't specify a year, OR (|) over all years to increase search accuracy (ex: <Name> <RE-23|23-RE|RE-22|22-RE...>)." \
        "Remember space instead of AND and | instead of OR and use <> brackets for grouping, not ()." \
        "All spaces will be used as AND statements." \
        "If you find no results: Tell the user what your query was and that you found nothing."
    )

    messages = [{"role": "system", "content": system_prompt}]
    # Add past conversation to messages
    for user_msg, agent_msg in history:
        if user_msg:
            messages.append({"role": "user", "content": user_msg})
        if agent_msg:
            messages.append({"role": "assistant", "content": agent_msg})

    # 3. Call the LLM with tool specifications
    response = chat_with_functions(messages, tools=EVERYTHING_SEARCH_TOOL_SPEC)
    response_message = response["choices"][0]["message"]

    # 4. Check if the model wants to call a tool
    if response_message.get("tool_calls"):
        messages.append(response_message)  # Append the assistant's message with tool_calls
        tool_calls = response_message["tool_calls"]
        
        for tool_call in tool_calls:
            function_name = tool_call["function"]["name"]
            print("Tool call: ", tool_call)

            if function_name == "search":
                print(f"--- Executing search tool with args: {tool_call['function']['arguments']} ---")
                tool_output_str = ""
                try:
                    os.environ.pop('HTTP_PROXY', None)
                    os.environ.pop('HTTPS_PROXY', None)
                    args_string = tool_call["function"]["arguments"]
                    args = json.loads(args_string)
                    
                    sdk_path = os.getenv("EVERYTHING_SDK_PATH", "C:\\Everything-SDK\\dll\\Everything64.dll")
                    env = os.environ.copy()
                    env["EVERYTHING_SDK_PATH"] = sdk_path

                    transport = StdioTransport(command="uvx", args=["mcp-server-everything-search"], env=env)
                    client = Client(transport)
                    
                    tool_args = {"query": args["query"], "max_results": args.get("max_results", 100)}
                    wrapped_args = {"base": tool_args}
                    
                    tool_response = None
                    async with client:
                        tool_response = await client.call_tool("search", wrapped_args)
                    print("Response: ", tool_response)
                    
                    if tool_response and tool_response.content:
                        for content_block in tool_response.content:
                            if hasattr(content_block, 'text'):
                                tool_output_str += content_block.text + "\n"
                    tool_output_str = tool_output_str.strip()
                    print("tool output string", tool_output_str)
                except Exception as e:
                    tool_output_str = f"An error occurred while executing the search tool: {str(e)}"
                    print(tool_output_str)

                messages.append({
                    "role": "tool",
                    "tool_call_id": tool_call["id"],
                    "name": function_name,
                    "content": tool_output_str or "No results found.",
                })
                print(f"Appended tool message for call id: {tool_call['id']}")
            else:
                 messages.append({
                    "role": "tool",
                    "tool_call_id": tool_call["id"],
                    "name": function_name,
                    "content": f"Unknown tool: {function_name}",
                })


        try:
            print("--- Calling LLM with all tool responses ---")
            final_response = chat_with_functions(messages)
            print("Agent's final response: ", final_response)
            agent_reply = final_response["choices"][0]["message"]["content"]
        except Exception as e:
            agent_reply = f"An error occurred after tool execution: {str(e)}"
    else:
        # The model responded directly without using a tool
        agent_reply = response_message["content"]

    # 8. Update history and return
    history[-1][1] = agent_reply
    return history, history, "" # Return updated history for chatbot, updated state, and clear the textbox


async def handle_search_interaction(
    user_query: str,
    history: list,
    enable_verification: bool = False,
    target_count: int = 10
):
    """
    Main search interaction handler.
    Uses OpenHands SearchAgent if available, otherwise falls back to legacy.

    Args:
        user_query: User's search query
        history: Chat history
        enable_verification: Enable document verification with sub-agents
        target_count: Number of verified documents to find
    """
    if OPENHANDS_AVAILABLE:
        return await handle_search_interaction_openhands(
            user_query, history, enable_verification, target_count
        )
    else:
        return await handle_search_interaction_legacy(user_query, history)


def clear_chat():
    """Clears the chatbot history, window, and input box."""
    return [], [], ""


with gr.Blocks() as demo:
    gr.Markdown("""
    # Consulting Assistant
    """)
    gr.Markdown("---")
    gr.Markdown(
    """
    ## Shop Drawing Comparison Agent Network
    Upload your appliance schedule, shop drawing, and spec sheet to get a comparison report. All files must be of type .pdf, .docx or .png.
    .pdf or .docx files are best for most accurate readings.
    """)
    with gr.Row():
        schedule_file = gr.File(label="Appliance Schedule", file_types=['.pdf', '.docx', '.png'])
        drawing_file = gr.File(label="Shop Drawing", file_types=['.pdf', '.docx', '.png'])
        spec_file = gr.File(label="Spec Sheet", file_types=['.pdf', '.docx', '.png'])

    submit_button = gr.Button("Generate Comparison Report")
    output_report = gr.File(label="Comparison Report")
    used_files_display = gr.Markdown()

    # Agent Logs Section (Collapsible)
    with gr.Accordion("Agent Workflow Logs", open=False):
        gr.Markdown("""
        View detailed logs of the agent workflow including:
        - Classification process
        - Subagent delegation (Receptionist → Expert)
        - File extraction statistics
        - LLM calls and responses
        """)
        agent_logs_display = gr.Textbox(
            label="Agent Logs",
            lines=20,
            max_lines=50,
            interactive=False,
            placeholder="Agent logs will appear here after processing...",
        )

    # Use async function directly when OpenHands available, otherwise use sync wrapper
    if OPENHANDS_AVAILABLE:
        submit_button.click(
            handle_user_openhands,
            inputs=[schedule_file, drawing_file, spec_file],
            outputs=[output_report, used_files_display, schedule_file, drawing_file, spec_file, agent_logs_display]
        )
    else:
        submit_button.click(
            handle_user,
            inputs=[schedule_file, drawing_file, spec_file],
            outputs=[output_report, used_files_display, schedule_file, drawing_file, spec_file, agent_logs_display]
        )

    # --- Everything Search Agent UI ---
    gr.Markdown("---")
    gr.Markdown("## Everything File Search Agent")

    search_history = gr.State([])
    chatbot = gr.Chatbot(label="Search Results")

    with gr.Row():
        search_query_box = gr.Textbox(
            label="Ask about files...",
            placeholder="e.g., Find all project proposals in the health care sector started last year",
            scale=4
        )

    # Verification Controls
    with gr.Row():
        enable_verification = gr.Checkbox(
            label="Enable Document Verification (uses sub-agents to verify matches)",
            value=False,
            info="When enabled, spawns verifier sub-agents to check if documents match criteria"
        )
        target_count = gr.Number(
            label="Target Count",
            value=10,
            minimum=1,
            maximum=50,
            step=1,
            info="Number of verified documents to find"
        )

    with gr.Row():
        search_button = gr.Button("Search Files", variant="primary")
        clear_button = gr.Button("Clear")

    gr.on(
        triggers=[search_button.click, search_query_box.submit],
        fn=handle_search_interaction,
        inputs=[search_query_box, search_history, enable_verification, target_count],
        outputs=[chatbot, search_history, search_query_box]
    )

    clear_button.click(
        fn=clear_chat,
        inputs=None,
        outputs=[chatbot, search_history, search_query_box]
    )


if __name__ == "__main__":
    demo.launch()