Skip to content

Commit b052f1b

Browse files
author
bedogni@unimore.it
committed
Better prints
1 parent 8332d02 commit b052f1b

File tree

6 files changed

+151
-42
lines changed

6 files changed

+151
-42
lines changed

server_client_light/client/http_client.py

Lines changed: 27 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -199,11 +199,8 @@ def register_device():
199199
payload = {"device_id": DEVICE_ID}
200200
try:
201201
r = requests.post(url, json=payload, timeout=5)
202-
print("Registration:", r.status_code, r.text)
203202
return True
204203
except requests.exceptions.RequestException as e:
205-
print(f"WARNING: Registration failed (server unreachable): {e}")
206-
print(" → Continuing with local-only inference")
207204
return False
208205

209206
# ------------------------------------------------
@@ -228,10 +225,8 @@ def send_image():
228225

229226
try:
230227
r = requests.post(url, data=buffer, headers={"Content-Type": "application/octet-stream"}, timeout=5)
231-
print("Image sent:", r.status_code)
232228
return True
233229
except requests.exceptions.RequestException as e:
234-
print(f"WARNING: Image send failed (server unreachable): {e}")
235230
return False
236231

237232
# ----------------------------
@@ -243,10 +238,8 @@ def get_offloading_layer():
243238
r = requests.get(url, timeout=5)
244239
if r.status_code == 200:
245240
best_layer = r.json().get("offloading_layer_index", LAST_OFFLOADING_LAYER)
246-
print("Best layer received:", best_layer)
247241
return best_layer
248242
else:
249-
print("Error requesting layer:", r.status_code)
250243
return LAST_OFFLOADING_LAYER
251244
except requests.exceptions.RequestException as e:
252245
print(f"WARNING: Cannot reach server: {e}")
@@ -283,7 +276,6 @@ def run_split_inference(image, tflite_dir, stop_layer):
283276
# Handle -1 as "run all layers until the end"
284277
if stop_layer == -1:
285278
stop_layer = LAST_OFFLOADING_LAYER
286-
print(f"Offloading layer -1: Running all {stop_layer + 1} layers locally")
287279

288280
for i in range(stop_layer + 1):
289281
model_path = str(tflite_dir / f"{SUBMODEL_PREFIX}_{i}.tflite")
@@ -300,23 +292,18 @@ def run_split_inference(image, tflite_dir, stop_layer):
300292
# Apply artificial computation delay
301293
if computation_delay.enabled:
302294
delay = computation_delay.apply_delay()
303-
print(f" Layer {i} computation delay: {delay*1000:.2f}ms")
304295

305296
interpreter.invoke()
306297
t1 = time.time()
307298
inference_times.append(t1 - t0)
308-
# Increase inference time with random time -> simulate a slower client
309-
#t = ((t1-t0) + random.uniform(0,0.02))
310-
#inference_times.append(t)
311299
input_data = interpreter.get_tensor(output_details[0]['index'])
312-
print(f"Layer {i} OK → output shape: {input_data.shape}")
313300
return input_data, inference_times
314301

315-
def send_inference_result(output_data, inference_times, layer_index, message_id):
302+
def send_inference_result(output_data, inference_times, layer_index, message_id, acq_time_ms):
316303
# Apply network delay before sending
304+
send_start = time.time()
317305
if network_delay.enabled:
318306
delay = network_delay.apply_delay()
319-
print(f" Applied network delay: {delay*1000:.2f}ms")
320307

321308
url = f"{SERVER}{ENDPOINTS['device_inference_result']}"
322309
timestamp = time.time()
@@ -326,19 +313,21 @@ def send_inference_result(output_data, inference_times, layer_index, message_id)
326313
buffer += DEVICE_ID.encode("ascii").ljust(9, b'\x00')
327314
buffer += message_id.encode("ascii").ljust(4, b'\x00')
328315
buffer += struct.pack("i", layer_index)
316+
buffer += struct.pack("f", acq_time_ms / 1000.0) # Send acquisition time in seconds
329317
buffer += struct.pack("I", output_data.nbytes)
330318
buffer += output_data.tobytes()
331319
buffer += struct.pack("i", len(inference_times) * 4)
332320
buffer += np.array(inference_times, dtype=np.float32).tobytes()
333321

334322
try:
335323
r = requests.post(url, data=buffer, headers={"Content-Type": "application/octet-stream"}, timeout=5)
336-
print("Output sent:", r.status_code)
337-
return True
324+
send_end = time.time()
325+
network_time = send_end - send_start
326+
data_size = len(buffer)
327+
network_speed = (data_size / network_time) / 1024 if network_time > 0 else 0 # KB/s
328+
return True, network_time, network_speed, data_size
338329
except requests.exceptions.RequestException as e:
339-
print(f"WARNING: Cannot send result to server: {e}")
340-
print(" → Local inference completed, result not synchronized")
341-
return False
330+
return False, 0, 0, 0
342331

343332
# -----
344333
# MAIN
@@ -353,8 +342,12 @@ def main():
353342
# Try to register, but continue even if it fails
354343
server_available = register_device()
355344
if not server_available:
356-
print("\nWARNING: Server not available - Running in LOCAL-ONLY mode")
357-
print(" Client will continue and retry server connection on each request\n")
345+
print("WARNING: Server not available - Running in LOCAL-ONLY mode\n")
346+
else:
347+
print("Connected to server\n")
348+
349+
print("Offload | Acq Time (ms) | Comp Time (ms) | Net Time (ms) | Net Speed (KB/s) | Data (bytes)")
350+
print("-" * 90)
358351

359352
while True:
360353
# Try to send image (optional, just for server tracking)
@@ -363,17 +356,26 @@ def main():
363356
# Get offloading decision (fallback to local if server unreachable)
364357
best_layer = get_offloading_layer()
365358

366-
time.sleep(1) # Ensure server has processed the request
367359
message_id = generate_message_id()
360+
361+
# Measure image acquisition time
362+
acq_start = time.time()
368363
image = load_image_rgb(IMAGE_PATH)
364+
acq_end = time.time()
365+
acq_time = (acq_end - acq_start) * 1000 # Convert to ms
369366

370367
# Always run inference (local or split based on best_layer)
371368
output_data, inference_times = run_split_inference(image, TFLITE_DIR, best_layer)
369+
comp_time = sum(inference_times) * 1000 # Convert to ms
372370

373371
# Try to send results (for variance tracking and algorithm updates)
374-
send_inference_result(output_data, inference_times, best_layer, message_id)
372+
success, net_time, net_speed, data_size = send_inference_result(output_data, inference_times, best_layer, message_id, acq_time)
375373

376-
print(f"Inference complete (layers 0-{best_layer})\n")
374+
# Print single line summary
375+
if success:
376+
print(f"{best_layer:7d} | {acq_time:14.2f} | {comp_time:14.2f} | {net_time*1000:12.2f} | {net_speed:16.2f} | {data_size:12d}")
377+
else:
378+
print(f"{best_layer:7d} | {acq_time:14.2f} | {comp_time:14.2f} | {'N/A':>12} | {'N/A':>16} | {'N/A':>12}")
377379

378380
main()
379381

src/server/communication/http_server.py

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
import time
77
import traceback
88
import sys
9+
import yaml
10+
from pathlib import Path
911

1012
class HttpServer:
1113
def __init__(
@@ -25,6 +27,9 @@ def __init__(
2527
self.endpoints = endpoints
2628

2729
self.devices = set()
30+
31+
# Load verbose setting
32+
self.verbose = self._load_verbose_config()
2833

2934
# Set up model
3035
self.input_height = input_height
@@ -122,10 +127,32 @@ async def close_simulation_csv():
122127
raise HTTPException(status_code=500, detail=str(e))
123128
raise HTTPException(status_code=500, detail=str(e))
124129

130+
def _load_verbose_config(self):
131+
"""Load verbose configuration from settings.yaml"""
132+
try:
133+
settings_path = Path(__file__).parent.parent / "settings.yaml"
134+
with open(settings_path, 'r') as f:
135+
settings = yaml.safe_load(f)
136+
return settings.get('verbose', False)
137+
except Exception:
138+
return False
139+
125140
def run(self):
126141
import uvicorn
142+
import logging
143+
144+
# Configure uvicorn logging based on verbose setting
145+
if not self.verbose:
146+
# Suppress all uvicorn and FastAPI logging
147+
logging.getLogger("uvicorn").setLevel(logging.CRITICAL)
148+
logging.getLogger("uvicorn.access").setLevel(logging.CRITICAL)
149+
logging.getLogger("uvicorn.error").setLevel(logging.CRITICAL)
150+
logging.getLogger("fastapi").setLevel(logging.CRITICAL)
151+
127152
uvicorn.run(
128153
self.app,
129154
host=self.host,
130-
port=self.port
155+
port=self.port,
156+
log_level="critical" if not self.verbose else "info",
157+
access_log=self.verbose
131158
)

src/server/communication/request_handler.py

Lines changed: 57 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -48,26 +48,47 @@ def load_local_inference_config():
4848
return {'enabled': False, 'probability': 0.0}
4949

5050

51+
def load_verbose_config():
52+
"""Load verbose configuration from settings.yaml"""
53+
settings_path = Path(__file__).parent.parent / "settings.yaml"
54+
try:
55+
with open(settings_path, 'r') as f:
56+
settings = yaml.safe_load(f)
57+
return settings.get('verbose', False)
58+
except Exception as e:
59+
return False
60+
61+
5162
class RequestHandler():
5263
# Class-level variance detector (shared across all requests)
5364
variance_detector = VarianceDetector(window_size=10, variance_threshold=0.15)
5465
# Class-level CSV file tracking for simulation results
5566
csv_file = None
5667
csv_writer = None
5768
inference_counter = 0
69+
header_printed = False
5870

5971
def __init__(self):
72+
# Load verbose configuration
73+
self.verbose = load_verbose_config()
74+
75+
# Print header once
76+
if not RequestHandler.header_printed:
77+
print("\nDevice | Offload | Acq Time (ms) | Device Comp (ms) | Edge Comp (ms) | Net Time (ms) | Total (ms)")
78+
print("-" * 100)
79+
RequestHandler.header_printed = True
80+
6081
# Load network delay configuration
6182
network_delay_config = load_network_delay_config()
6283
self.network_delay = DelaySimulator(network_delay_config)
63-
if self.network_delay.enabled:
84+
if self.network_delay.enabled and self.verbose:
6485
logger.info(f"Network delay simulation enabled: {self.network_delay.get_delay_info()}")
6586

6687
# Load local inference mode configuration
6788
local_config = load_local_inference_config()
6889
self.local_inference_enabled = local_config.get('enabled', False)
6990
self.local_inference_probability = local_config.get('probability', 0.0)
70-
if self.local_inference_enabled:
91+
if self.local_inference_enabled and self.verbose:
7192
logger.info(f"Local inference mode enabled with probability {self.local_inference_probability:.0%}")
7293

7394
# Initialize network speed tracking
@@ -116,7 +137,7 @@ def should_force_local_inference(self) -> bool:
116137
# Randomly decide based on probability
117138
should_force = random.random() < self.local_inference_probability
118139

119-
if should_force:
140+
if should_force and self.verbose:
120141
logger.info("Forcing local-only inference to refresh device times")
121142

122143
return should_force
@@ -125,14 +146,16 @@ def handle_registration(self, device_id):
125146
# Apply network delay before responding
126147
if self.network_delay.enabled:
127148
delay = self.network_delay.apply_delay()
128-
logger.debug(f"Applied network delay: {delay*1000:.2f}ms")
149+
if self.verbose:
150+
logger.debug(f"Applied network delay: {delay*1000:.2f}ms")
129151
return device_id
130152

131153
def handle_device_input(self, rgb565_image, height, width):
132154
# Apply network delay after receiving input
133155
if self.network_delay.enabled:
134156
delay = self.network_delay.apply_delay()
135-
logger.debug(f"Applied network delay: {delay*1000:.2f}ms")
157+
if self.verbose:
158+
logger.debug(f"Applied network delay: {delay*1000:.2f}ms")
136159

137160
# Extract timestamp if present (first 8 bytes)
138161
import struct
@@ -147,7 +170,8 @@ def handle_device_input(self, rgb565_image, height, width):
147170
if latency > 0:
148171
# Store in class variable for use in offloading decision
149172
self.last_avg_speed = payload_size / latency
150-
logger.info(f"Network speed calculated: {self.last_avg_speed:.2f} bytes/sec (latency: {latency*1000:.2f}ms)")
173+
if self.verbose:
174+
logger.info(f"Network speed calculated: {self.last_avg_speed:.2f} bytes/sec (latency: {latency*1000:.2f}ms)")
151175

152176
# Remove timestamp from image data
153177
rgb565_image = rgb565_image[8:]
@@ -190,12 +214,14 @@ def handle_device_inference_result(self, body, received_timestamp):
190214
if message_data.offloading_layer_index == -1 or message_data.offloading_layer_index >= 58:
191215
# All layers completed on device, no edge inference needed
192216
prediction = np.array(message_data.layer_output, dtype=np.float32)
193-
logger.debug(f"All layers completed on device (layer_index={message_data.offloading_layer_index})")
217+
if self.verbose:
218+
logger.debug(f"All layers completed on device (layer_index={message_data.offloading_layer_index})")
194219
else:
195220
# Continue inference on edge from where device stopped
196221
prediction, edge_layer_times = Edge.run_inference(message_data.offloading_layer_index, np.array(message_data.layer_output, dtype=np.float32))
197222
num_edge_layers = len(edge_layer_times)
198-
logger.debug(f"Edge processed {num_edge_layers} layers with times: {edge_layer_times}")
223+
if self.verbose:
224+
logger.debug(f"Edge processed {num_edge_layers} layers with times: {edge_layer_times}")
199225

200226
# Update edge inference times with EMA (same alpha as device for faster adaptation)
201227
with open(OffloadingDataFiles.data_file_path_edge, 'r') as f:
@@ -238,29 +264,44 @@ def handle_device_inference_result(self, body, received_timestamp):
238264

239265
RequestHandler.csv_writer.writerow(row)
240266
RequestHandler.csv_file.flush()
241-
logger.debug(f"Recorded inference {RequestHandler.inference_counter} to CSV")
267+
if self.verbose:
268+
logger.debug(f"Recorded inference {RequestHandler.inference_counter} to CSV")
242269

243-
logger.debug(f"Prediction: {prediction.tolist()}")
270+
if self.verbose:
271+
logger.debug(f"Prediction: {prediction.tolist()}")
244272
MessageData.save_to_file(EvaluationFiles.evaluation_file_path, message_data.to_dict())
245273

274+
# Print clean one-line summary
275+
device_id = message_data.device_id
276+
offload_layer = message_data.offloading_layer_index
277+
acq_time = message_data.message_content.get("acquisition_time", 0) * 1000 # Convert to ms
278+
device_comp_time = sum(message_data.device_layers_inference_time) * 1000 # Convert to ms
279+
edge_comp_time = sum(edge_layer_times) * 1000 if edge_layer_times else 0 # Convert to ms
280+
network_time = (received_timestamp - message_data.timestamp) * 1000 # Convert to ms
281+
total_time = acq_time + device_comp_time + edge_comp_time + network_time
282+
print(f"{device_id:9s} | {offload_layer:7d} | {acq_time:13.2f} | {device_comp_time:16.2f} | {edge_comp_time:14.2f} | {network_time:13.2f} | {total_time:10.2f}")
283+
246284
# Apply network delay before responding
247285
if self.network_delay.enabled:
248286
delay = self.network_delay.apply_delay()
249-
logger.debug(f"Applied network delay before response: {delay*1000:.2f}ms")
287+
if self.verbose:
288+
logger.debug(f"Applied network delay before response: {delay*1000:.2f}ms")
250289

251290
# run offloading algorithm
252291
device_inference_times, edge_inference_times, layers_sizes = RequestHandler._load_stats()
253-
logger.debug(f"Loaded stats data")
292+
if self.verbose:
293+
logger.debug(f"Loaded stats data")
254294

255295
# Check if variance detected - potentially need to re-test offloading
256-
if RequestHandler.variance_detector.should_retest_offloading():
296+
if RequestHandler.variance_detector.should_retest_offloading() and self.verbose:
257297
logger.warning("Offloading algorithm may need re-evaluation due to inference time variance")
258298

259299
# Check if we should force local-only inference for refreshing times
260300
if self.should_force_local_inference():
261301
# Force all layers on device (offloading layer = -1 means no offloading)
262302
best_offloading_layer = -1
263-
logger.info("Forcing all layers on device for time refresh (local inference mode)")
303+
if self.verbose:
304+
logger.info("Forcing all layers on device for time refresh (local inference mode)")
264305
else:
265306
offloading_algo = OffloadingAlgo(
266307
avg_speed=self.last_avg_speed,
@@ -309,6 +350,8 @@ def _from_raw(topic: str, payload: bytes):
309350
offset += 4
310351
message_content["offloading_layer_index"] = struct.unpack('i', payload[offset:offset+4])[0]
311352
offset += 4
353+
message_content["acquisition_time"] = struct.unpack('f', payload[offset:offset+4])[0]
354+
offset += 4
312355
layer_output_size = struct.unpack('I', payload[offset:offset+4])[0]
313356
offset += 4
314357
message_content["layer_output"] = struct.unpack(f'<{int(layer_output_size/4)}f', payload[offset:offset+layer_output_size])

src/server/edge/run_edge.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from server.edge.edge_initialization import Edge
2-
from server.logger.log import logger
2+
from server.logger.log import logger, configure_logger_from_settings
33

44
import yaml
55
from server.communication.websocket_server import WebsocketServer
@@ -11,6 +11,9 @@
1111
from server.commons import ConfigurationFiles
1212

1313
if __name__ == "__main__":
14+
# Configure logger based on settings
15+
configure_logger_from_settings()
16+
1417
logger.info("Starting the [EDGE] MQTT client")
1518

1619
with open(ConfigurationFiles.server_configuration_file_path, "r") as f:

0 commit comments

Comments
 (0)