sahancpal commited on
Commit
4d2a406
·
verified ·
1 Parent(s): b950166

Upload view_parquet.py

Browse files
Files changed (1) hide show
  1. view_parquet.py +151 -0
view_parquet.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import pandas as pd
4
+ import argparse
5
+ from pathlib import Path
6
+ from collections import Counter
7
+
8
+
9
+ def analyze_parquet(parquet_file, rows_to_display=10):
10
+ """View and analyze the operator_input_models parquet file."""
11
+
12
+ print(f"Reading parquet file: {parquet_file}")
13
+ df = pd.read_parquet(parquet_file)
14
+
15
+ print(f"\n{'='*80}")
16
+ print("DATASET OVERVIEW")
17
+ print(f"{'='*80}")
18
+ print(f"Total rows: {len(df):,}")
19
+ print(f"Columns: {list(df.columns)}")
20
+ print(f"Memory usage: {df.memory_usage(deep=True).sum() / 1024**2:.2f} MB")
21
+
22
+ print(f"\n{'='*80}")
23
+ print("UNIQUE COUNTS")
24
+ print(f"{'='*80}")
25
+ print(f"Unique operators: {df['operator name'].nunique():,}")
26
+ print(f"Unique models: {df['used in model'].nunique():,}")
27
+ print(f"Unique argument configurations: {df['args'].nunique():,}")
28
+
29
+ print(f"\n{'='*80}")
30
+ print("MODEL SOURCE BREAKDOWN")
31
+ print(f"{'='*80}")
32
+
33
+ # Extract model sources
34
+ model_sources = {
35
+ 'HuggingFace': [],
36
+ 'TorchBench': [],
37
+ 'Timm': [],
38
+ 'Other': []
39
+ }
40
+
41
+ unique_models = df['used in model'].unique()
42
+
43
+ for model in unique_models:
44
+ if model.startswith('HuggingFace/'):
45
+ model_sources['HuggingFace'].append(model)
46
+ elif model.startswith('TorchBench/'):
47
+ model_sources['TorchBench'].append(model)
48
+ elif model.startswith('Timm/'):
49
+ model_sources['Timm'].append(model)
50
+ else:
51
+ model_sources['Other'].append(model)
52
+
53
+ # Print source statistics
54
+ for source, models in model_sources.items():
55
+ if models:
56
+ print(f"\n{source}: {len(models)} models")
57
+ # Count total rows per source
58
+ source_rows = df[df['used in model'].isin(models)]
59
+ print(f" - Total operator instances: {len(source_rows):,}")
60
+ print(f" - Unique operators used: {source_rows['operator name'].nunique()}")
61
+ # Show sample models
62
+ sample_models = sorted(models)[:5]
63
+ for model in sample_models:
64
+ print(f" • {model}")
65
+ if len(models) > 5:
66
+ print(f" ... and {len(models) - 5} more")
67
+
68
+ print(f"\n{'='*80}")
69
+ print("TOP OPERATORS BY USAGE")
70
+ print(f"{'='*80}")
71
+ operator_counts = df['operator name'].value_counts().head(10)
72
+ for i, (op, count) in enumerate(operator_counts.items(), 1):
73
+ print(f"{i:2}. {op:<50} {count:5} uses")
74
+
75
+ print(f"\n{'='*80}")
76
+ print("TOP MODELS BY OPERATOR COUNT")
77
+ print(f"{'='*80}")
78
+ model_counts = df['used in model'].value_counts().head(10)
79
+ for i, (model, count) in enumerate(model_counts.items(), 1):
80
+ print(f"{i:2}. {model:<50} {count:5} operators")
81
+
82
+ print(f"\n{'='*80}")
83
+ print(f"SAMPLE DATA (first {rows_to_display} rows)")
84
+ print(f"{'='*80}")
85
+
86
+ # Display sample with truncated args for readability
87
+ sample_df = df.head(rows_to_display).copy()
88
+ sample_df['args'] = sample_df['args'].apply(lambda x: x[:100] + '...' if len(x) > 100 else x)
89
+
90
+ pd.set_option('display.max_columns', None)
91
+ pd.set_option('display.width', None)
92
+ pd.set_option('display.max_colwidth', 50)
93
+
94
+ print(sample_df.to_string(index=False))
95
+
96
+ return df, model_sources
97
+
98
+
99
+ def main():
100
+ parser = argparse.ArgumentParser(description='View and analyze operator_input_models parquet file')
101
+ parser.add_argument('--input', '-i',
102
+ default='operator_input_models_mapping.parquet',
103
+ help='Input Parquet file (default: operator_input_models_mapping.parquet)')
104
+ parser.add_argument('--rows', '-r',
105
+ type=int,
106
+ default=10,
107
+ help='Number of sample rows to display (default: 10)')
108
+ parser.add_argument('--query', '-q',
109
+ help='Filter by operator name (partial match)')
110
+ parser.add_argument('--model', '-m',
111
+ help='Filter by model name (partial match)')
112
+
113
+ args = parser.parse_args()
114
+
115
+ # Check if input file exists
116
+ if not Path(args.input).exists():
117
+ print(f"Error: Input file '{args.input}' not found")
118
+ return 1
119
+
120
+ # Analyze the parquet file
121
+ df, model_sources = analyze_parquet(args.input, args.rows)
122
+
123
+ # Apply filters if specified
124
+ if args.query or args.model:
125
+ print(f"\n{'='*80}")
126
+ print("FILTERED RESULTS")
127
+ print(f"{'='*80}")
128
+
129
+ filtered_df = df.copy()
130
+
131
+ if args.query:
132
+ filtered_df = filtered_df[filtered_df['operator name'].str.contains(args.query, case=False)]
133
+ print(f"Filtering for operator containing: '{args.query}'")
134
+
135
+ if args.model:
136
+ filtered_df = filtered_df[filtered_df['used in model'].str.contains(args.model, case=False)]
137
+ print(f"Filtering for model containing: '{args.model}'")
138
+
139
+ if len(filtered_df) > 0:
140
+ print(f"\nFound {len(filtered_df)} matching entries")
141
+ sample_df = filtered_df.head(args.rows).copy()
142
+ sample_df['args'] = sample_df['args'].apply(lambda x: x[:100] + '...' if len(x) > 100 else x)
143
+ print(sample_df.to_string(index=False))
144
+ else:
145
+ print("No matching entries found")
146
+
147
+ return 0
148
+
149
+
150
+ if __name__ == "__main__":
151
+ exit(main())