novalad-evaluation / evaluate.py
codewithaman's picture
Upload folder using huggingface_hub
4b4ea7f verified
import argparse
from src.utils import read_file, check_data_validity
from src.layout_evaluation import evaluate_layout
from src.table_evaluation import evaluate_table
def parse_args():
parser = argparse.ArgumentParser(description="Arguments for evaluation")
parser.add_argument(
"--ref_path",
# For Layout
type=str, default=r"D:\Novalad\codes\eval\dp-bench\dataset\reference_table.json",
# For Table
#type=str, default=r"D:\Novalad\codes\eval\dp-bench\dataset\reference_novalad.json",
help="Path to the ground truth file"
)
parser.add_argument(
"--pred_path",
type=str, default=r"D:\Novalad\codes\eval\dp-bench\dataset\sample_results\novalad.json",
help="Path to the prediction file"
)
parser.add_argument(
"--ignore_classes_for_layout",
type=list, default=[],#["table", "chart"], # For layout : ["table", "chart"], for table : []
help="List of layout classes to ignore. This is used only for layout evaluation."
)
parser.add_argument(
"--mode",
type=str, default="table",
help="Mode for evaluation (layout/table)"
)
return parser.parse_args()
def main():
args = parse_args()
print("Arguments:")
for k, v in vars(args).items():
print(f" {k}: {v}")
print("-" * 50)
label_data = read_file(args.ref_path)
pred_data = read_file(args.pred_path)
check_data_validity(label_data, pred_data)
if args.mode == "layout":
score = evaluate_layout(
label_data, pred_data,
ignore_classes=args.ignore_classes_for_layout,
)
print(f"NID Score: {score:.4f}")
elif args.mode == "table":
teds_score, teds_s_score = evaluate_table(label_data, pred_data)
print(f"TEDS Score: {teds_score:.4f}")
print(f"TEDS-S Score: {teds_s_score:.4f}")
else:
raise ValueError(f"{args.mode} mode not supported")
if __name__ == "__main__":
main()