Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file added infinimetrics/__init__.py
Empty file.
Empty file added infinimetrics/tools/__init__.py
Empty file.
Empty file.
131 changes: 131 additions & 0 deletions infinimetrics/tools/benchmark/model_summary

Large diffs are not rendered by default.

25 changes: 25 additions & 0 deletions infinimetrics/tools/benchmark/op_library_mock.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/usr/bin/env python3
import time
import random

def benchmark_operator(op_type: str, fqn_name: str, input_shape: list):
"""
This is a "mock" operator library interface.
It pretends to run a test and returns a simulated latency.

(In the future, we can replace this with a real call like:
`actual_latency = infiniCore.benchmark(...)`)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个信息最好在对应要替换的代码行上面加一个 TODO,这样清楚一些,便于之后定位

"""

# 1. Simulate work (e.g., 0.01 to 0.1 seconds)
mock_duration_sec = random.uniform(0.01, 0.1)
time.sleep(mock_duration_sec)

# 2. Convert seconds to milliseconds and round to 4 decimal places
mock_latency_ms = round(mock_duration_sec * 1000, 4)

# 3. Return measurement results
return {
"actual_latency_ms": mock_latency_ms,
"status": "SUCCESS"
}
142 changes: 142 additions & 0 deletions infinimetrics/tools/benchmark/parse_summary.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
#!/usr/bin/env python3
import re, json, ast, argparse, sys

def parse_shape_string(shape_str: str):
"""
Converts a shape string '[80, 64, 768]' into a list [80, 64, 768].
"""
try:
parsed_shape = ast.literal_eval(shape_str)
if isinstance(parsed_shape, (list, tuple)):
return list(parsed_shape)
return None
except (ValueError, SyntaxError):
return None

def parse_number_string(num_str: str) -> int:
"""
Converts a number string with commas '3,087,790,080' to an integer.
Handles '--' as 0.
"""
if num_str.strip() == '--':
return 0
return int(num_str.replace(',', ''))

def parse_model_summary_hierarchical(file_path: str) -> list:
"""
Parses the model summary file, extracts all operators in order of appearance,
and generates a 'Fully Qualified Name' (FQN) for each.
"""

operator_test_cases = []
layer_name_regex = re.compile(r'([A-Za-z0-9_]+)\s*\(([a-zA-Z0-9_]+)\)')
prefix_regex = re.compile(r'^[│└├\s-]*')
line_regex = re.compile(
r'^(?P<layer_info>.*?)\s+'
r'(?P<input_shape>\[.*?\]|--)\s+'
r'(?P<output_shape>\[.*?\]|--)\s+'
r'(?P<params>[\d,]+|--)\s+'
r'(?P<mult_adds>[\d,]+|--)$'
)

indent_stack = [-1]
path_stack = []

with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
if line.startswith('===') or line.startswith('Layer (type'):
continue
line_str = line.strip()
if not line_str:
continue

prefix_match = prefix_regex.match(line)
current_indent = len(prefix_match.group(0))
line_match = line_regex.match(line_str)
if not line_match:
continue

layer_info_str = line_match.group('layer_info')
name_match = layer_name_regex.search(layer_info_str)
if not name_match:
continue

op_type = name_match.group(1)
local_name = name_match.group(2)

while current_indent <= indent_stack[-1]:
indent_stack.pop()
path_stack.pop()

path_stack.append(local_name)
indent_stack.append(current_indent)

mult_adds_str = line_match.group('mult_adds').strip()
if mult_adds_str == '--' or parse_number_string(mult_adds_str) == 0:
continue

if len(path_stack) > 1:
fully_qualified_name = ".".join(path_stack[1:])
else:
fully_qualified_name = local_name

input_shape_str = line_match.group('input_shape').strip()
output_shape_str = line_match.group('output_shape').strip()
input_shape = parse_shape_string(input_shape_str)
output_shape = parse_shape_string(output_shape_str)
mult_adds = parse_number_string(mult_adds_str)

operator_test_cases.append({
"fully_qualified_name": fully_qualified_name,
"op_type": op_type,
"local_name": local_name,
"input_shape": input_shape,
"output_shape": output_shape,
"mult_adds": mult_adds
})

return operator_test_cases

def main():
"""
Responsible for converting the model summary to test cases (JSON) only.
"""
parser = argparse.ArgumentParser(
description="Parses model summary (txt) into operator test cases (json).",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-i", "--input-file",
default="model_summary",
help="Path to the input model summary file."
)
parser.add_argument(
"-o", "--output-file",
default="operator_test_cases.json",
help="Path to the output operator test cases (JSON) file."
)

args = parser.parse_args()

try:
print(f"--- 1. Parsing Phase ---")
print(f"Parsing {args.input_file} ...")

test_cases = parse_model_summary_hierarchical(args.input_file)

print(f"Parsing complete! Found {len(test_cases)} operator instances in sequential order.")

with open(args.output_file, 'w', encoding='utf-8') as f:
json.dump(test_cases, f, indent=4)

print(f"Test cases saved to: {args.output_file}")

except FileNotFoundError:
print(f"Error: File {args.input_file} not found.")
sys.exit(1)
except Exception as e:
print(f"An error occurred during parsing: {e}")
sys.exit(1)

if __name__ == "__main__":
main()
123 changes: 123 additions & 0 deletions infinimetrics/tools/benchmark/run_benchmark.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
#!/usr/bin/env python3

import json
import argparse
import sys
from datetime import datetime

try:
from . import op_library_mock as op_library
# import op_library_real as op_library # (Future real call)
except ImportError:
# Fallback for running script directly without -m
import op_library_mock as op_library


def run_operator_tests(test_cases: list) -> list:
"""
Iterates through test cases, calls the operator library, and returns a list containing the results.
"""

results_list = []
total_cases = len(test_cases)

print("\n" + "="*60)
print(f"🚀 Starting operator performance test ({total_cases} total)...")
print("="*60 + "\n")

for i, case in enumerate(test_cases):

fqn_name = case["fully_qualified_name"]
op_type = case["op_type"]
input_shape = case["input_shape"]

print(f"--- Running ({i+1}/{total_cases}): {fqn_name} ---")

try:
# 1. Call the underlying operator library
result = op_library.benchmark_operator(
op_type=op_type,
fqn_name=fqn_name,
input_shape=input_shape
# (In the future we may can pass more parameters, e.g., "precision=...")
)

# 2. Merge "original config" and "measurement results"
case_result = case.copy()
case_result.update(result)

print(f" > Status: {result['status']}, Latency: {result['actual_latency_ms']} ms\n")

except Exception as e:
#Ensure robustness: a single operator failure should not terminate the entire test
print(f" > Status: FAILED, Error: {e}\n")
case_result = case.copy()
case_result.update({
"actual_latency_ms": None,
"status": f"FAILED: {e}"
})

results_list.append(case_result)

print("="*60)
print("✅ All operator tests completed.")
print("="*60)

return results_list


def main():
"""
Main function:
1. (Load) Read "Test Case" JSON file.
2. (Execute) Call the operator library to run tests.
3. (Report) Merge "Config" and "Result", save as new "Test Result" JSON file.
"""

parser = argparse.ArgumentParser(
description="Reads operator test cases (JSON) and executes benchmarks.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-i", "--input-config",
default="operator_test_cases.json",
help="Path to the input 'Operator Test Cases' (JSON) file."
)
parser.add_argument(
"-o", "--output-report",
default=f"operator_test_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
help="Path to the output 'Operator Test Results' (JSON) file."
)

args = parser.parse_args()

# --- 1. (Load) ---
try:
print(f"--- 1. Loading Test Configuration ---")
with open(args.input_config, 'r', encoding='utf-8') as f:
test_cases = json.load(f)
print(f"Successfully loaded {len(test_cases)} test cases (from {args.input_config})")

except FileNotFoundError:
print(f"Error: Test case file {args.input_config} not found.")
print("Did you run `parse_summary.py` first to generate it?")
sys.exit(1)
except Exception as e:
print(f"Error occurred during loading: {e}")
sys.exit(1)

# --- 2. (Execute) ---
results_with_data = run_operator_tests(test_cases)

# --- 3. (Report) ---
try:
print(f"--- 3. Saving Final Report ---")
with open(args.output_report, 'w', encoding='utf-8') as f:
json.dump(results_with_data, f, indent=4)
print(f"Final assessment report (including latency) saved to: {args.output_report}")
except Exception as e:
print(f"Error occurred while saving report: {e}")
sys.exit(1)

if __name__ == "__main__":
main()
Empty file added tests/__init__.py
Empty file.
Empty file added tests/tools/__init__.py
Empty file.
Empty file.
49 changes: 49 additions & 0 deletions tests/tools/benchmark/test_op_library_mock.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import unittest
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

单元测试的部分目前感觉不是特别必要,因为逻辑都是输入输出格式的检查,但之后如果格式还有变化的话,还需要再重新改这部分

import sys
import os

# --- Path Configuration ---
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(os.path.dirname(current_dir))
if project_root not in sys.path:
sys.path.insert(0, project_root)

# Import module to be tested
from infinimetrics.tools.benchmark.op_library_mock import benchmark_operator

class TestOpLibraryMock(unittest.TestCase):
"""
Tests corresponding to op_library_mock.py
"""

def test_benchmark_interface_contract(self):
"""
Interface Contract Test: Verify that the returned data structure contains necessary fields.
This is crucial for future replacement with a real library.
"""
result = benchmark_operator(
op_type="Conv2d",
fqn_name="layer1.0.conv1",
input_shape=[1, 64, 56, 56]
)

# Must be a dictionary
self.assertIsInstance(result, dict)
# Must contain these two keys
self.assertIn("status", result)
self.assertIn("actual_latency_ms", result)
# Status must be SUCCESS (default mock library behavior)
self.assertEqual(result["status"], "SUCCESS")
# Latency must be a float
self.assertIsInstance(result["actual_latency_ms"], float)

def test_latency_simulation_logic(self):
"""Verify if simulated latency values are within a reasonable range"""
for _ in range(5):
result = benchmark_operator("Test", "test_op", [])
ms = result["actual_latency_ms"]
# We defined 0.01s~0.1s in code, i.e., 10ms~100ms
self.assertTrue(10 <= ms <= 100, f"Latency {ms}ms is unexpectedly out of range")

if __name__ == '__main__':
unittest.main()
40 changes: 40 additions & 0 deletions tests/tools/benchmark/test_parse_summary.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
import unittest
import sys
import os

# --- Path Configuration (Boilerplate) ---
# Allow running this script directly or via python -m unittest
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(os.path.dirname(current_dir))
if project_root not in sys.path:
sys.path.insert(0, project_root)

# Import module to be tested
from infinimetrics.tools.benchmark.parse_summary import parse_shape_string, parse_number_string

class TestModelParser(unittest.TestCase):
"""
Tests corresponding to parse_summary.py
"""

def test_parse_shape_string_valid(self):
"""Test valid shape string parsing"""
self.assertEqual(parse_shape_string('[80, 64, 768]'), [80, 64, 768])
self.assertEqual(parse_shape_string('[1, 128]'), [1, 128])

def test_parse_shape_string_edge_cases(self):
"""Test edge cases for shape parsing"""
self.assertIsNone(parse_shape_string('invalid'))
self.assertIsNone(parse_shape_string('[1, 2')) # Missing closing bracket
self.assertEqual(parse_shape_string('[]'), []) # Empty list

def test_parse_number_string(self):
"""Test number string parsing"""
self.assertEqual(parse_number_string('3,087,790'), 3087790)
self.assertEqual(parse_number_string('100'), 100)
# Test special placeholders
self.assertEqual(parse_number_string('--'), 0)
self.assertEqual(parse_number_string(' -- '), 0)

if __name__ == '__main__':
unittest.main()
Loading