Udayshankar Ravikumar commited on
Commit
5dce886
·
unverified ·
1 Parent(s): 77546b2

Updated with ranking feature.

Browse files
Files changed (27) hide show
  1. README.md +1 -1
  2. app.py +220 -202
  3. requirements.txt +3 -2
  4. surrogate_models_v2_json/model_crc32_ipc.json +0 -0
  5. surrogate_models_v2_json/model_crc32_ipc_meta.json +14 -0
  6. surrogate_models_v2_json/model_crc32_l2_miss_rate.json +0 -0
  7. surrogate_models_v2_json/model_crc32_l2_miss_rate_meta.json +14 -0
  8. surrogate_models_v2_json/model_dijkstra_ipc.json +0 -0
  9. surrogate_models_v2_json/model_dijkstra_ipc_meta.json +14 -0
  10. surrogate_models_v2_json/model_dijkstra_l2_miss_rate.json +0 -0
  11. surrogate_models_v2_json/model_dijkstra_l2_miss_rate_meta.json +14 -0
  12. surrogate_models_v2_json/model_fft_ipc.json +0 -0
  13. surrogate_models_v2_json/model_fft_ipc_meta.json +14 -0
  14. surrogate_models_v2_json/model_fft_l2_miss_rate.json +0 -0
  15. surrogate_models_v2_json/model_fft_l2_miss_rate_meta.json +14 -0
  16. surrogate_models_v2_json/model_matrix_mul_ipc.json +0 -0
  17. surrogate_models_v2_json/model_matrix_mul_ipc_meta.json +14 -0
  18. surrogate_models_v2_json/model_matrix_mul_l2_miss_rate.json +0 -0
  19. surrogate_models_v2_json/model_matrix_mul_l2_miss_rate_meta.json +14 -0
  20. surrogate_models_v2_json/model_qsort_ipc.json +0 -0
  21. surrogate_models_v2_json/model_qsort_ipc_meta.json +14 -0
  22. surrogate_models_v2_json/model_qsort_l2_miss_rate.json +0 -0
  23. surrogate_models_v2_json/model_qsort_l2_miss_rate_meta.json +14 -0
  24. surrogate_models_v2_json/model_sha_ipc.json +0 -0
  25. surrogate_models_v2_json/model_sha_ipc_meta.json +14 -0
  26. surrogate_models_v2_json/model_sha_l2_miss_rate.json +0 -0
  27. surrogate_models_v2_json/model_sha_l2_miss_rate_meta.json +14 -0
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🧠
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
- sdk_version: 5.42.0
8
  app_file: app.py
9
  pinned: false
10
  license: cc-by-nc-sa-4.0
 
4
  colorFrom: blue
5
  colorTo: green
6
  sdk: gradio
7
+ sdk_version: 6.12.0
8
  app_file: app.py
9
  pinned: false
10
  license: cc-by-nc-sa-4.0
app.py CHANGED
@@ -1,25 +1,33 @@
1
- import gradio as gr
2
- import pandas as pd
3
- import numpy as np
4
- import joblib
5
  import os
6
- from huggingface_hub import snapshot_download
 
7
  import tempfile
 
 
 
 
 
 
8
 
9
- # -------------------------------------------------
10
- # Configuration
11
- # -------------------------------------------------
12
- HF_REPO_ID = "uralstech/AIDE-Chip-Surrogates"
13
- MODEL_DIR = "surrogate_models_v2"
14
 
15
- WORKLOAD_ALIAS = {
16
- "matrix": "matrix_mul",
17
- "matmul": "matrix_mul",
18
  }
19
 
20
- TARGETS = ["ipc", "l2_miss_rate"]
21
 
22
- FEATURE_COLS = [
 
 
 
 
 
 
23
  "l1d_size_log2",
24
  "l1i_size_log2",
25
  "l2_size_log2",
@@ -28,98 +36,43 @@ FEATURE_COLS = [
28
  "l2_assoc_log2",
29
  "l2_l1d_ratio_log2",
30
  "l1d_sets_log2",
31
- "l2_sets_log2",
32
  ]
33
 
34
- REQUIRED_COLS = [
35
- "workload",
36
- "l1d_size",
37
- "l1i_size",
38
- "l2_size",
39
- "l1d_assoc",
40
- "l1i_assoc",
41
- "l2_assoc",
42
- ]
43
 
44
- # -------------------------------------------------
45
- # Global model cache
46
- # -------------------------------------------------
47
- MODEL_CACHE = {}
48
-
49
- # -------------------------------------------------
50
- # Model Download
51
- # -------------------------------------------------
52
- def ensure_models():
53
- if not os.path.exists(MODEL_DIR):
54
- snapshot_download(
55
- repo_id=HF_REPO_ID,
56
- local_dir=".",
57
- allow_patterns="*.pkl",
58
- )
59
 
60
- # -------------------------------------------------
61
- # Utilities
62
- # -------------------------------------------------
63
- def resolve_workload(workload: str) -> str:
64
- return WORKLOAD_ALIAS.get(workload, workload)
65
-
66
- def load_model(workload: str, target: str):
67
- try:
68
- return MODEL_CACHE[(workload, target)]
69
- except KeyError:
70
- raise RuntimeError(f"Model not preloaded: {workload}, {target}")
71
-
72
- def physical_sanity_check(ipc, miss_rate):
73
- out = []
74
- if ipc < 0 or ipc > 3.5:
75
- out.append(f"IPC={ipc:.3f} out of physical range")
76
- if miss_rate < 0 or miss_rate > 1:
77
- out.append(f"L2 miss rate={miss_rate:.3f} out of [0,1]")
78
- return out
79
-
80
- # -------------------------------------------------
81
- # Preload models (runs once at app start)
82
- # -------------------------------------------------
83
- def preload_models():
84
- ensure_models()
85
-
86
- workloads = {
87
- "crc32",
88
- "dijkstra",
89
- "fft",
90
- "matrix_mul",
91
- "qsort",
92
- "sha",
93
- }
94
-
95
- for workload in workloads:
96
- for target in TARGETS:
97
- path = os.path.join(
98
- MODEL_DIR, f"model_{workload}_{target}.pkl"
99
- )
100
- payload = joblib.load(path)
101
- MODEL_CACHE[(workload, target)] = (
102
- payload["model"],
103
- payload["log_target"],
104
- )
105
-
106
- return "ready"
107
-
108
- # -------------------------------------------------
109
- # Inference Core
110
- # -------------------------------------------------
111
- def run_inference(df: pd.DataFrame) -> pd.DataFrame:
112
- missing = set(REQUIRED_COLS) - set(df.columns)
113
- if missing:
114
- raise ValueError(f"Missing required columns: {missing}")
115
 
116
  for col in [
117
- "l1d_size",
118
- "l1i_size",
119
- "l2_size",
120
- "l1d_assoc",
121
- "l1i_assoc",
122
- "l2_assoc",
123
  ]:
124
  df[f"{col}_log2"] = np.log2(df[col])
125
 
@@ -127,126 +80,191 @@ def run_inference(df: pd.DataFrame) -> pd.DataFrame:
127
  df["l1d_sets_log2"] = df["l1d_size_log2"] - df["l1d_assoc_log2"]
128
  df["l2_sets_log2"] = df["l2_size_log2"] - df["l2_assoc_log2"]
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  df["pred_ipc"] = np.nan
131
  df["pred_l2_miss_rate"] = np.nan
132
- df["warnings"] = ""
133
 
134
- for idx, row in df.iterrows():
135
- workload = resolve_workload(row["workload"])
136
- X = row[FEATURE_COLS].values.reshape(1, -1)
137
 
138
- preds = {}
139
- warn_msgs = []
 
140
 
141
- for target in TARGETS:
142
- model, is_log = load_model(workload, target)
143
- pred_raw = model.predict(X)[0]
144
- pred = np.expm1(pred_raw) if is_log else pred_raw
145
- if target == "l2_miss_rate":
146
- pred = np.clip(pred, 0, 1)
147
- preds[target] = float(pred)
148
 
149
- warn_msgs.extend(
150
- physical_sanity_check(preds["ipc"], preds["l2_miss_rate"])
151
- )
152
 
153
- df.at[idx, "pred_ipc"] = preds["ipc"]
154
- df.at[idx, "pred_l2_miss_rate"] = preds["l2_miss_rate"]
155
- df.at[idx, "warnings"] = "; ".join(warn_msgs)
156
 
157
- return df
 
158
 
159
- # -------------------------------------------------
160
- # Gradio Wrapper
161
- # -------------------------------------------------
162
- def infer_from_csv(file):
163
- df = pd.read_csv(file.name)
164
- out_df = run_inference(df)
165
 
166
- tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".csv")
167
- out_df.to_csv(tmp.name, index=False)
168
 
169
- warning_rows = out_df[out_df["warnings"] != ""]
170
- warning_text = (
171
- f"{len(warning_rows)} rows triggered sanity warnings."
172
- if not warning_rows.empty
173
- else "No sanity warnings detected."
174
- )
175
 
176
- return out_df.head(20), tmp.name, warning_text
 
177
 
178
- # -------------------------------------------------
179
- # UI
180
- # -------------------------------------------------
181
- with gr.Blocks(title="AIDE Chip Surrogate Inference") as demo:
182
- # Loading screen ONLY
183
- loading_md = gr.Markdown(
184
- "## Downloading surrogate models…\n\nThis may take a while.",
185
- visible=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  )
187
 
188
- # Main app (hidden initially)
189
- with gr.Column(visible=False) as app_ui:
190
- gr.Markdown(
191
- """
192
- # AIDE Chip Surrogate Inference
193
-
194
- Upload a CSV describing cache configurations and workloads.
195
- The app will run surrogate models to predict:
196
- - IPC
197
- - L2 Miss Rate
198
-
199
- ## Expected CSV Format
200
-
201
- The input CSV **must** contain the following columns:
202
-
203
- **Required columns**
204
- - `workload` — one of: `crc32`, `dijkstra`, `fft`, `matrix_mul`, `qsort`, `sha`
205
- - `l1d_size` — L1 data cache size (kibibytes, power of two)
206
- - `l1i_size` — L1 instruction cache size (kibibytes, power of two)
207
- - `l2_size` — L2 cache size (kibibytes, power of two)
208
- - `l1d_assoc` — L1D associativity (power of two)
209
- - `l1i_assoc` — L1I associativity (power of two)
210
- - `l2_assoc` L2 associativity (power of two)
211
-
212
- **Notes**
213
- - All size and associativity values must be positive and powers of two.
214
- - One row corresponds to one cache configuration.
215
-
216
- **Example**
217
- ```
218
- workload,l1d_size,l1i_size,l2_size,l1d_assoc,l1i_assoc,l2_assoc
219
- matrix_mul,128,64,1024,16,8,16
220
- fft,128,64,2048,16,8,32
221
- ```
222
- """
223
  )
224
 
225
- csv_input = gr.File(label="Input CSV", file_types=[".csv"])
226
- run_btn = gr.Button("Run Inference")
227
 
228
- preview = gr.Dataframe(label="Preview (first 20 rows)")
229
- output_csv = gr.File(label="Download Full Output CSV")
230
- warnings_box = gr.Textbox(label="Sanity Check Summary")
231
 
232
- run_btn.click(
233
- infer_from_csv,
234
- inputs=csv_input,
235
- outputs=[preview, output_csv, warnings_box],
236
- )
 
 
 
 
 
 
 
 
237
 
238
- # Startup load hook
239
- demo.load(
240
- preload_models,
241
- inputs=None,
242
- outputs=None,
243
- ).then(
244
- lambda: (
245
- gr.update(visible=False),
246
- gr.update(visible=True),
247
- ),
248
- outputs=[loading_md, app_ui],
249
  )
250
 
251
- if __name__ == "__main__":
252
- demo.launch()
 
 
 
 
 
1
  import os
2
+ import json
3
+ import time
4
  import tempfile
5
+ import numpy as np
6
+ import pandas as pd
7
+ import xgboost as xgb
8
+ import shap
9
+ import matplotlib.pyplot as plt
10
+ import gradio as gr
11
 
12
+ # =========================================================
13
+ # CONFIG
14
+ # =========================================================
15
+ TOP_K = 20
16
+ MODEL_DIR = "surrogate_models_v2_json"
17
 
18
+ WORKLOADS = {
19
+ "crc32", "dijkstra", "fft", "qsort", "sha", "matrix_mul"
 
20
  }
21
 
22
+ TARGETS = {"ipc", "l2_miss_rate"}
23
 
24
+ REQUIRED_INPUT_COLUMNS = {
25
+ "l1d_size", "l1i_size", "l2_size",
26
+ "l1d_assoc", "l1i_assoc", "l2_assoc",
27
+ "workload"
28
+ }
29
+
30
+ FEATURES = [
31
  "l1d_size_log2",
32
  "l1i_size_log2",
33
  "l2_size_log2",
 
36
  "l2_assoc_log2",
37
  "l2_l1d_ratio_log2",
38
  "l1d_sets_log2",
39
+ "l2_sets_log2"
40
  ]
41
 
42
+ # =========================================================
43
+ # LOAD MODELS
44
+ # =========================================================
45
+ def load_models():
46
+ models = {}
47
+ explainers = {}
 
 
 
48
 
49
+ for w in WORKLOADS:
50
+ for t in TARGETS:
51
+ name = f"model_{w}_{t}"
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ model = xgb.XGBRegressor()
54
+ model.load_model(os.path.join(MODEL_DIR, f"{name}.json"))
55
+
56
+ with open(os.path.join(MODEL_DIR, f"{name}_meta.json")) as f:
57
+ meta = json.load(f)
58
+
59
+ models[name] = (model, meta["log_target"])
60
+ explainers[name] = shap.TreeExplainer(model)
61
+
62
+ return models, explainers
63
+
64
+
65
+ MODELS, EXPLAINERS = load_models()
66
+
67
+ # =========================================================
68
+ # FEATURE ENGINEERING
69
+ # =========================================================
70
+ def engineer_features(df):
71
+ df = df.copy()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
  for col in [
74
+ "l1d_size", "l1i_size", "l2_size",
75
+ "l1d_assoc", "l1i_assoc", "l2_assoc"
 
 
 
 
76
  ]:
77
  df[f"{col}_log2"] = np.log2(df[col])
78
 
 
80
  df["l1d_sets_log2"] = df["l1d_size_log2"] - df["l1d_assoc_log2"]
81
  df["l2_sets_log2"] = df["l2_size_log2"] - df["l2_assoc_log2"]
82
 
83
+ return df
84
+
85
+
86
+ # =========================================================
87
+ # MAIN INFERENCE
88
+ # =========================================================
89
+ def run_inference(file):
90
+ df = pd.read_csv(file.name)
91
+
92
+ missing = REQUIRED_INPUT_COLUMNS - set(df.columns)
93
+ if missing:
94
+ raise gr.Error(f"Missing required columns: {missing}")
95
+
96
+ start = time.perf_counter()
97
+
98
+ df = engineer_features(df)
99
+
100
  df["pred_ipc"] = np.nan
101
  df["pred_l2_miss_rate"] = np.nan
102
+ df["was_clipped"] = False
103
 
104
+ shap_summary_data = []
 
 
105
 
106
+ for workload, idx in df.groupby("workload").groups.items():
107
+ if workload not in WORKLOADS:
108
+ raise gr.Error(f"Unknown workload: {workload}")
109
 
110
+ X = df.loc[idx, FEATURES]
 
 
 
 
 
 
111
 
112
+ model_ipc, log_ipc = MODELS[f"model_{workload}_ipc"]
113
+ model_miss, log_miss = MODELS[f"model_{workload}_l2_miss_rate"]
 
114
 
115
+ raw_ipc = model_ipc.predict(X)
116
+ ipc = np.expm1(raw_ipc) if log_ipc else raw_ipc
 
117
 
118
+ raw_miss = model_miss.predict(X)
119
+ miss = np.expm1(raw_miss) if log_miss else raw_miss
120
 
121
+ clipped = (ipc > 3.5) | (ipc < 0)
 
 
 
 
 
122
 
123
+ ipc = np.clip(ipc, 0, 3.5)
124
+ miss = np.clip(miss, 0, 1)
125
 
126
+ df.loc[idx, "pred_ipc"] = ipc
127
+ df.loc[idx, "pred_l2_miss_rate"] = miss
128
+ df.loc[idx, "was_clipped"] = clipped
 
 
 
129
 
130
+ shap_values = EXPLAINERS[f"model_{workload}_ipc"].shap_values(X)
131
+ shap_summary_data.append((X, shap_values))
132
 
133
+ inference_time = time.perf_counter() - start
134
+
135
+ # Ranking
136
+ df["score"] = df["pred_ipc"] / (1 + df["pred_l2_miss_rate"])
137
+
138
+ group_cols = [
139
+ "l1d_size", "l1i_size", "l2_size",
140
+ "l1d_assoc", "l1i_assoc", "l2_assoc"
141
+ ]
142
+
143
+ agg = df.groupby(group_cols).agg({
144
+ "pred_ipc": "mean",
145
+ "pred_l2_miss_rate": "mean"
146
+ }).reset_index()
147
+
148
+ ipc_std = df.groupby(group_cols)["pred_ipc"].std().reset_index()
149
+ ipc_std.rename(columns={"pred_ipc": "ipc_std"}, inplace=True)
150
+
151
+ agg = agg.merge(ipc_std, on=group_cols)
152
+
153
+ agg["score"] = agg["pred_ipc"] / (1 + agg["pred_l2_miss_rate"])
154
+ agg["robust_score"] = agg["score"] / (1 + agg["ipc_std"])
155
+
156
+ agg = agg.sort_values("robust_score", ascending=False)
157
+
158
+ topk = agg.head(TOP_K)
159
+
160
+ # Save CSVs
161
+ ranked_path = tempfile.NamedTemporaryFile(delete=False, suffix=".csv").name
162
+ topk_path = tempfile.NamedTemporaryFile(delete=False, suffix=".csv").name
163
+
164
+ agg.to_csv(ranked_path, index=False)
165
+ topk.to_csv(topk_path, index=False)
166
+
167
+ # Plot topk robust score
168
+ fig_rank, ax = plt.subplots(figsize=(10, 5))
169
+ ax.bar(range(len(topk)), topk["robust_score"])
170
+ ax.set_title("Top-K Robust Scores")
171
+ ax.set_xlabel("Rank")
172
+ ax.set_ylabel("Robust Score")
173
+ plt.tight_layout()
174
+
175
+ # SHAP Global Summary
176
+ X_all = pd.concat([x for x, _ in shap_summary_data], axis=0)
177
+ shap_all = np.vstack([s for _, s in shap_summary_data])
178
+
179
+ fig_shap = plt.figure(figsize=(10, 6))
180
+ shap.summary_plot(
181
+ shap_all,
182
+ X_all,
183
+ feature_names=FEATURES,
184
+ show=False
185
+ )
186
+ plt.tight_layout()
187
+
188
+ summary = f"""
189
+ ### Inference Complete
190
+
191
+ - **Inference Time:** {inference_time:.3f}s
192
+ - **Total Pairs:** {len(df):,}
193
+ - **Unique Configs:** {len(agg):,}
194
+ - **OOD / Clipped:** {df['was_clipped'].sum():,}
195
+ """
196
+
197
+ return (
198
+ summary,
199
+ topk,
200
+ fig_rank,
201
+ fig_shap,
202
+ ranked_path,
203
+ topk_path
204
  )
205
 
206
+
207
+ # =========================================================
208
+ # UI
209
+ # =========================================================
210
+ custom_css = """
211
+ .gradio-container {
212
+ max-width: 1400px !important;
213
+ margin: auto;
214
+ }
215
+ footer {display:none !important;}
216
+ """
217
+
218
+ with gr.Blocks(
219
+ theme=gr.themes.Soft(
220
+ primary_hue="blue",
221
+ secondary_hue="indigo"
222
+ ),
223
+ css=custom_css,
224
+ title="AIDE-Chip Cache Ranker"
225
+ ) as demo:
226
+
227
+ gr.Markdown("""
228
+ # 🚀 AIDE-Chip Cache Configuration Ranker
229
+ ### Surrogate Model Inference + SHAP Explainability
230
+ Upload workload/config CSV to rank optimal cache configurations.
231
+ """)
232
+
233
+ with gr.Row():
234
+ file_input = gr.File(
235
+ label="Upload Input CSV",
236
+ file_types=[".csv"]
 
 
 
 
237
  )
238
 
239
+ run_btn = gr.Button("Run Inference", variant="primary", size="lg")
 
240
 
241
+ summary_md = gr.Markdown()
 
 
242
 
243
+ with gr.Tabs():
244
+ with gr.Tab("🏆 Top Configurations"):
245
+ topk_df = gr.Dataframe()
246
+
247
+ with gr.Tab("📈 Ranking Plot"):
248
+ rank_plot = gr.Plot()
249
+
250
+ with gr.Tab("🧠 SHAP Explainability"):
251
+ shap_plot = gr.Plot()
252
+
253
+ with gr.Tab("⬇ Downloads"):
254
+ ranked_csv = gr.File(label="Download Ranked CSV")
255
+ topk_csv = gr.File(label="Download Top-K CSV")
256
 
257
+ run_btn.click(
258
+ fn=run_inference,
259
+ inputs=file_input,
260
+ outputs=[
261
+ summary_md,
262
+ topk_df,
263
+ rank_plot,
264
+ shap_plot,
265
+ ranked_csv,
266
+ topk_csv
267
+ ]
268
  )
269
 
270
+ demo.launch()
 
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
- pandas
2
  numpy
3
- joblib
4
  xgboost
 
 
1
+ shap
2
  numpy
3
+ pandas
4
  xgboost
5
+ matplotlib
surrogate_models_v2_json/model_crc32_ipc.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_crc32_ipc_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": false
14
+ }
surrogate_models_v2_json/model_crc32_l2_miss_rate.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_crc32_l2_miss_rate_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": true
14
+ }
surrogate_models_v2_json/model_dijkstra_ipc.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_dijkstra_ipc_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": false
14
+ }
surrogate_models_v2_json/model_dijkstra_l2_miss_rate.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_dijkstra_l2_miss_rate_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": true
14
+ }
surrogate_models_v2_json/model_fft_ipc.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_fft_ipc_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": false
14
+ }
surrogate_models_v2_json/model_fft_l2_miss_rate.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_fft_l2_miss_rate_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": true
14
+ }
surrogate_models_v2_json/model_matrix_mul_ipc.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_matrix_mul_ipc_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": false
14
+ }
surrogate_models_v2_json/model_matrix_mul_l2_miss_rate.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_matrix_mul_l2_miss_rate_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": true
14
+ }
surrogate_models_v2_json/model_qsort_ipc.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_qsort_ipc_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": false
14
+ }
surrogate_models_v2_json/model_qsort_l2_miss_rate.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_qsort_l2_miss_rate_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": true
14
+ }
surrogate_models_v2_json/model_sha_ipc.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_sha_ipc_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": false
14
+ }
surrogate_models_v2_json/model_sha_l2_miss_rate.json ADDED
The diff for this file is too large to render. See raw diff
 
surrogate_models_v2_json/model_sha_l2_miss_rate_meta.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "features": [
3
+ "l1d_size_log2",
4
+ "l1i_size_log2",
5
+ "l2_size_log2",
6
+ "l1d_assoc_log2",
7
+ "l1i_assoc_log2",
8
+ "l2_assoc_log2",
9
+ "l2_l1d_ratio_log2",
10
+ "l1d_sets_log2",
11
+ "l2_sets_log2"
12
+ ],
13
+ "log_target": true
14
+ }