feat: fixes and configuration to path

This commit is contained in:
Saleh Bubshait
2025-12-17 18:16:44 +00:00
parent a4a9644f9e
commit abfd851c59
4 changed files with 130 additions and 93 deletions

View File

@@ -10,8 +10,13 @@ TEMP_VERILOG_FILE = "temp.v"
TEMP_TESTBENCH_FILE = "testbench.v"
TCL_SCRIPT_FILE = "run_testbench.tcl"
def write_tcl():
# Generate the TCL script for Vivado
# --- CONFIGURATION ---
# Default fallback path if env var is missing
DEFAULT_VIVADO_PATH = "/tools/Xilinx/Vivado/2023.1/bin"
# FIX 1: Add 'top_module' as an argument so the function can use it
def write_tcl(top_module):
# Generate the TCL script for Vivado
tcl_commands = f"""
create_project temp_project ./temp_project -force -part xc7z020clg400-1
set_property source_mgmt_mode All [current_project]
@@ -33,14 +38,15 @@ def extract_top_module_name(testbench_file):
for line in file:
match = re.search(r'\s*module\s+(\w+)\s*;', line)
if match:
print(match.group(1))
return match.group(1) # Extract module name
return None # Return None if no module found
def run_functional_correctness():
# Load JSON files
if not os.path.exists(SOLUTIONS_FILE):
print(f"Error: {SOLUTIONS_FILE} not found.")
return
with open(SOLUTIONS_FILE, "r", encoding="utf-8") as file:
solutions_data = json.load(file)
@@ -56,34 +62,31 @@ def run_functional_correctness():
if module_name and testbench_code:
module_testbenches[module_name] = testbench_code
# print(module_testbenches.keys())
# Get Vivado path from environment variable
vivado_path = os.environ.get("vivado")
if not vivado_path:
raise EnvironmentError("Vivado environment variable not set.")
vivado_path = os.path.join(vivado_path, "vivado.bat")
# FIX 2: Handle Linux Path correctly (No .bat)
vivado_path_env = os.environ.get("vivado")
if vivado_path_env:
vivado_bin = os.path.join(vivado_path_env, "vivado")
elif os.path.exists(os.path.join(DEFAULT_VIVADO_PATH, "vivado")):
vivado_bin = os.path.join(DEFAULT_VIVADO_PATH, "vivado")
else:
vivado_bin = "vivado" # Hope it's in PATH
print(vivado_bin)
# Iterate over solutions and test them
for model, categories in solutions_data.items():
for category, modules in categories.items():
for module_entry in modules:
module_name = module_entry["module"]
# print(module_name)
# print(module_name in module_testbenches.keys())
if module_name not in module_testbenches:
print(f"Skipping {module_name}: No testbench found.")
continue
testbench_code = module_testbenches[module_name]
solutions = module_entry["solutions"]
# Iterate over all solutions
for solution_entry in solutions:
for idx, solution_entry in enumerate(solutions):
verilog_code = solution_entry["solution"]
# Write the Verilog design to a file
@@ -101,31 +104,34 @@ def run_functional_correctness():
solution_entry["pass"] = "Error: Could not extract top module."
continue
print(f"Testing module: {module_name} (Top Module: {top_module})")
print(f"Testing {module_name} (Solution {idx+1})...")
write_tcl()
# FIX 3: Pass the variable to the function
write_tcl(top_module)
# Run Vivado in batch mode
print(f"Running Vivado simulation for {module_name}...")
process = subprocess.run([vivado_path, "-mode", "batch", "-source", TCL_SCRIPT_FILE], capture_output=True, text=True)
try:
process = subprocess.run([vivado_bin, "-mode", "batch", "-source", TCL_SCRIPT_FILE], capture_output=True, text=True)
output_log = process.stdout + "\n" + process.stderr
except FileNotFoundError:
print("CRITICAL ERROR: Vivado executable not found. Check path.")
return
# Capture output logs
output_log = process.stdout + "\n" + process.stderr
print(output_log)
test_passed = "All tests passed" in output_log
# Determine pass/fail status
if test_passed:
solution_entry["pass"] = "true"
print(f" ✅ PASS")
else:
# Extract relevant error messages
error_lines = "\n".join(line for line in output_log.split("\n") if "error" or "fail" in line.lower())
solution_entry["pass"] = error_lines if error_lines else "Test failed somehow"
solution_entry["pass"] = "false" # Keep it simple for now
print(f" ❌ FAIL")
print(f"Test result for {module_name}: {'PASS' if test_passed else 'FAIL'}")
# Save results after testing each module
# Save results incrementally
with open(SOLUTIONS_FILE, "w", encoding="utf-8") as file:
json.dump(solutions_data, file, indent=4)
print("All tests completed.")
if __name__ == "__main__":
run_functional_correctness()