@@ -48,26 +48,47 @@ def load_local_inference_config():
4848 return {'enabled' : False , 'probability' : 0.0 }
4949
5050
51+ def load_verbose_config ():
52+ """Load verbose configuration from settings.yaml"""
53+ settings_path = Path (__file__ ).parent .parent / "settings.yaml"
54+ try :
55+ with open (settings_path , 'r' ) as f :
56+ settings = yaml .safe_load (f )
57+ return settings .get ('verbose' , False )
58+ except Exception as e :
59+ return False
60+
61+
5162class RequestHandler ():
5263 # Class-level variance detector (shared across all requests)
5364 variance_detector = VarianceDetector (window_size = 10 , variance_threshold = 0.15 )
5465 # Class-level CSV file tracking for simulation results
5566 csv_file = None
5667 csv_writer = None
5768 inference_counter = 0
69+ header_printed = False
5870
5971 def __init__ (self ):
72+ # Load verbose configuration
73+ self .verbose = load_verbose_config ()
74+
75+ # Print header once
76+ if not RequestHandler .header_printed :
77+ print ("\n Device | Offload | Acq Time (ms) | Device Comp (ms) | Edge Comp (ms) | Net Time (ms) | Total (ms)" )
78+ print ("-" * 100 )
79+ RequestHandler .header_printed = True
80+
6081 # Load network delay configuration
6182 network_delay_config = load_network_delay_config ()
6283 self .network_delay = DelaySimulator (network_delay_config )
63- if self .network_delay .enabled :
84+ if self .network_delay .enabled and self . verbose :
6485 logger .info (f"Network delay simulation enabled: { self .network_delay .get_delay_info ()} " )
6586
6687 # Load local inference mode configuration
6788 local_config = load_local_inference_config ()
6889 self .local_inference_enabled = local_config .get ('enabled' , False )
6990 self .local_inference_probability = local_config .get ('probability' , 0.0 )
70- if self .local_inference_enabled :
91+ if self .local_inference_enabled and self . verbose :
7192 logger .info (f"Local inference mode enabled with probability { self .local_inference_probability :.0%} " )
7293
7394 # Initialize network speed tracking
@@ -116,7 +137,7 @@ def should_force_local_inference(self) -> bool:
116137 # Randomly decide based on probability
117138 should_force = random .random () < self .local_inference_probability
118139
119- if should_force :
140+ if should_force and self . verbose :
120141 logger .info ("Forcing local-only inference to refresh device times" )
121142
122143 return should_force
@@ -125,14 +146,16 @@ def handle_registration(self, device_id):
125146 # Apply network delay before responding
126147 if self .network_delay .enabled :
127148 delay = self .network_delay .apply_delay ()
128- logger .debug (f"Applied network delay: { delay * 1000 :.2f} ms" )
149+ if self .verbose :
150+ logger .debug (f"Applied network delay: { delay * 1000 :.2f} ms" )
129151 return device_id
130152
131153 def handle_device_input (self , rgb565_image , height , width ):
132154 # Apply network delay after receiving input
133155 if self .network_delay .enabled :
134156 delay = self .network_delay .apply_delay ()
135- logger .debug (f"Applied network delay: { delay * 1000 :.2f} ms" )
157+ if self .verbose :
158+ logger .debug (f"Applied network delay: { delay * 1000 :.2f} ms" )
136159
137160 # Extract timestamp if present (first 8 bytes)
138161 import struct
@@ -147,7 +170,8 @@ def handle_device_input(self, rgb565_image, height, width):
147170 if latency > 0 :
148171 # Store in class variable for use in offloading decision
149172 self .last_avg_speed = payload_size / latency
150- logger .info (f"Network speed calculated: { self .last_avg_speed :.2f} bytes/sec (latency: { latency * 1000 :.2f} ms)" )
173+ if self .verbose :
174+ logger .info (f"Network speed calculated: { self .last_avg_speed :.2f} bytes/sec (latency: { latency * 1000 :.2f} ms)" )
151175
152176 # Remove timestamp from image data
153177 rgb565_image = rgb565_image [8 :]
@@ -190,12 +214,14 @@ def handle_device_inference_result(self, body, received_timestamp):
190214 if message_data .offloading_layer_index == - 1 or message_data .offloading_layer_index >= 58 :
191215 # All layers completed on device, no edge inference needed
192216 prediction = np .array (message_data .layer_output , dtype = np .float32 )
193- logger .debug (f"All layers completed on device (layer_index={ message_data .offloading_layer_index } )" )
217+ if self .verbose :
218+ logger .debug (f"All layers completed on device (layer_index={ message_data .offloading_layer_index } )" )
194219 else :
195220 # Continue inference on edge from where device stopped
196221 prediction , edge_layer_times = Edge .run_inference (message_data .offloading_layer_index , np .array (message_data .layer_output , dtype = np .float32 ))
197222 num_edge_layers = len (edge_layer_times )
198- logger .debug (f"Edge processed { num_edge_layers } layers with times: { edge_layer_times } " )
223+ if self .verbose :
224+ logger .debug (f"Edge processed { num_edge_layers } layers with times: { edge_layer_times } " )
199225
200226 # Update edge inference times with EMA (same alpha as device for faster adaptation)
201227 with open (OffloadingDataFiles .data_file_path_edge , 'r' ) as f :
@@ -238,29 +264,44 @@ def handle_device_inference_result(self, body, received_timestamp):
238264
239265 RequestHandler .csv_writer .writerow (row )
240266 RequestHandler .csv_file .flush ()
241- logger .debug (f"Recorded inference { RequestHandler .inference_counter } to CSV" )
267+ if self .verbose :
268+ logger .debug (f"Recorded inference { RequestHandler .inference_counter } to CSV" )
242269
243- logger .debug (f"Prediction: { prediction .tolist ()} " )
270+ if self .verbose :
271+ logger .debug (f"Prediction: { prediction .tolist ()} " )
244272 MessageData .save_to_file (EvaluationFiles .evaluation_file_path , message_data .to_dict ())
245273
274+ # Print clean one-line summary
275+ device_id = message_data .device_id
276+ offload_layer = message_data .offloading_layer_index
277+ acq_time = message_data .message_content .get ("acquisition_time" , 0 ) * 1000 # Convert to ms
278+ device_comp_time = sum (message_data .device_layers_inference_time ) * 1000 # Convert to ms
279+ edge_comp_time = sum (edge_layer_times ) * 1000 if edge_layer_times else 0 # Convert to ms
280+ network_time = (received_timestamp - message_data .timestamp ) * 1000 # Convert to ms
281+ total_time = acq_time + device_comp_time + edge_comp_time + network_time
282+ print (f"{ device_id :9s} | { offload_layer :7d} | { acq_time :13.2f} | { device_comp_time :16.2f} | { edge_comp_time :14.2f} | { network_time :13.2f} | { total_time :10.2f} " )
283+
246284 # Apply network delay before responding
247285 if self .network_delay .enabled :
248286 delay = self .network_delay .apply_delay ()
249- logger .debug (f"Applied network delay before response: { delay * 1000 :.2f} ms" )
287+ if self .verbose :
288+ logger .debug (f"Applied network delay before response: { delay * 1000 :.2f} ms" )
250289
251290 # run offloading algorithm
252291 device_inference_times , edge_inference_times , layers_sizes = RequestHandler ._load_stats ()
253- logger .debug (f"Loaded stats data" )
292+ if self .verbose :
293+ logger .debug (f"Loaded stats data" )
254294
255295 # Check if variance detected - potentially need to re-test offloading
256- if RequestHandler .variance_detector .should_retest_offloading ():
296+ if RequestHandler .variance_detector .should_retest_offloading () and self . verbose :
257297 logger .warning ("Offloading algorithm may need re-evaluation due to inference time variance" )
258298
259299 # Check if we should force local-only inference for refreshing times
260300 if self .should_force_local_inference ():
261301 # Force all layers on device (offloading layer = -1 means no offloading)
262302 best_offloading_layer = - 1
263- logger .info ("Forcing all layers on device for time refresh (local inference mode)" )
303+ if self .verbose :
304+ logger .info ("Forcing all layers on device for time refresh (local inference mode)" )
264305 else :
265306 offloading_algo = OffloadingAlgo (
266307 avg_speed = self .last_avg_speed ,
@@ -309,6 +350,8 @@ def _from_raw(topic: str, payload: bytes):
309350 offset += 4
310351 message_content ["offloading_layer_index" ] = struct .unpack ('i' , payload [offset :offset + 4 ])[0 ]
311352 offset += 4
353+ message_content ["acquisition_time" ] = struct .unpack ('f' , payload [offset :offset + 4 ])[0 ]
354+ offset += 4
312355 layer_output_size = struct .unpack ('I' , payload [offset :offset + 4 ])[0 ]
313356 offset += 4
314357 message_content ["layer_output" ] = struct .unpack (f'<{ int (layer_output_size / 4 )} f' , payload [offset :offset + layer_output_size ])
0 commit comments