File size: 2,073 Bytes
4b4ea7f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 |
import argparse
from src.utils import read_file, check_data_validity
from src.layout_evaluation import evaluate_layout
from src.table_evaluation import evaluate_table
def parse_args():
parser = argparse.ArgumentParser(description="Arguments for evaluation")
parser.add_argument(
"--ref_path",
# For Layout
type=str, default=r"D:\Novalad\codes\eval\dp-bench\dataset\reference_table.json",
# For Table
#type=str, default=r"D:\Novalad\codes\eval\dp-bench\dataset\reference_novalad.json",
help="Path to the ground truth file"
)
parser.add_argument(
"--pred_path",
type=str, default=r"D:\Novalad\codes\eval\dp-bench\dataset\sample_results\novalad.json",
help="Path to the prediction file"
)
parser.add_argument(
"--ignore_classes_for_layout",
type=list, default=[],#["table", "chart"], # For layout : ["table", "chart"], for table : []
help="List of layout classes to ignore. This is used only for layout evaluation."
)
parser.add_argument(
"--mode",
type=str, default="table",
help="Mode for evaluation (layout/table)"
)
return parser.parse_args()
def main():
args = parse_args()
print("Arguments:")
for k, v in vars(args).items():
print(f" {k}: {v}")
print("-" * 50)
label_data = read_file(args.ref_path)
pred_data = read_file(args.pred_path)
check_data_validity(label_data, pred_data)
if args.mode == "layout":
score = evaluate_layout(
label_data, pred_data,
ignore_classes=args.ignore_classes_for_layout,
)
print(f"NID Score: {score:.4f}")
elif args.mode == "table":
teds_score, teds_s_score = evaluate_table(label_data, pred_data)
print(f"TEDS Score: {teds_score:.4f}")
print(f"TEDS-S Score: {teds_s_score:.4f}")
else:
raise ValueError(f"{args.mode} mode not supported")
if __name__ == "__main__":
main()
|