#!/usr/bin/env python3 """ ========================================================================= CONFIGURAZIONE GPU OTTIMIZZATA PER SISTEMA DDoS DETECTION v04 ========================================================================= Ottimizzazioni specifiche per TensorFlow GPU su Tesla M60 + AlmaLinux 9.6 """ import tensorflow as tf import os import logging import json from pathlib import Path def configure_gpu_v04(): """Configura GPU per sistema DDoS Detection v04""" print("🎮 CONFIGURAZIONE GPU PER SISTEMA DDoS v04") print("=" * 50) # 1. VERIFICA DISPONIBILITÀ GPU gpus = tf.config.list_physical_devices('GPU') print(f"📊 GPU disponibili: {len(gpus)}") if not gpus: print("❌ Nessuna GPU trovata - modalità CPU") return False # 2. CONFIGURAZIONE MEMORIA GPU try: # Abilita crescita dinamica memoria (importante per Tesla M60 con 8GB) for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) print(f"✅ Memoria dinamica abilitata per {gpu}") # Limite memoria virtuale (utile per development) tf.config.experimental.set_virtual_device_configuration( gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=6144)] # 6GB su 8GB ) print("✅ Limite memoria virtuale: 6GB/8GB") except RuntimeError as e: print(f"⚠️ Errore configurazione memoria: {e}") # 3. OTTIMIZZAZIONI TENSORFLOW # Mixed precision per Tesla M60 (supporta FP16) policy = tf.keras.mixed_precision.Policy('mixed_float16') tf.keras.mixed_precision.set_global_policy(policy) print("✅ Mixed precision abilitata (FP16)") # Ottimizzazioni XLA tf.config.optimizer.set_jit(True) print("✅ XLA JIT compilation abilitata") # 4. VARIABILI AMBIENTE OTTIMALI optimal_env = { 'TF_GPU_MEMORY_GROWTH': '1', 'TF_ENABLE_GPU_GARBAGE_COLLECTION': '1', 'TF_GPU_THREAD_MODE': 'gpu_private', 'TF_ENABLE_ONEDNN_OPTS': '1', 'CUDA_VISIBLE_DEVICES': '0', 'TF_XLA_FLAGS': '--tf_xla_enable_xla_devices', 'TF_ENABLE_AUTO_MIXED_PRECISION': '1' } print("\n🌍 VARIABILI AMBIENTE OTTIMALI:") for key, value in optimal_env.items(): os.environ[key] = value print(f" {key}={value}") # 5. TEST PERFORMANCE GPU print("\n🧪 TEST PERFORMANCE GPU:") try: with tf.device('/GPU:0'): # Test matmul start_time = tf.timestamp() a = tf.random.normal([1000, 1000]) b = tf.random.normal([1000, 1000]) c = tf.matmul(a, b) end_time = tf.timestamp() gpu_time = (end_time - start_time).numpy() print(f" Matrix 1000x1000 multiply: {gpu_time:.4f}s") # Test CPU comparison with tf.device('/CPU:0'): start_time = tf.timestamp() a = tf.random.normal([1000, 1000]) b = tf.random.normal([1000, 1000]) c = tf.matmul(a, b) end_time = tf.timestamp() cpu_time = (end_time - start_time).numpy() print(f" CPU comparison: {cpu_time:.4f}s") print(f" GPU speedup: {cpu_time/gpu_time:.1f}x") except Exception as e: print(f"❌ Errore test performance: {e}") # 6. INFO DETTAGLIATE GPU print(f"\n📋 DETTAGLI GPU:") for i, gpu in enumerate(gpus): try: details = tf.config.experimental.get_device_details(gpu) print(f" GPU {i}:") for key, value in details.items(): print(f" {key}: {value}") except: print(f" GPU {i}: Dettagli non disponibili") # 7. SALVA CONFIGURAZIONE config_file = Path('gpu_config_v04.json') config = { 'gpu_count': len(gpus), 'gpu_names': [str(gpu) for gpu in gpus], 'tensorflow_version': tf.__version__, 'cuda_built': tf.test.is_built_with_cuda(), 'environment': optimal_env, 'mixed_precision': True, 'xla_enabled': True, 'memory_growth': True } with open(config_file, 'w') as f: json.dump(config, f, indent=2) print(f"\n💾 Configurazione salvata in: {config_file}") return True def test_ddos_v04_gpu(): """Test specifico per modelli DDoS Detection v04""" print("\n🚀 TEST MODELLI DDoS DETECTION v04 SU GPU") print("=" * 50) # Simula addestramento autoencoder v04 print("🤖 Test Autoencoder v04...") try: with tf.device('/GPU:0'): # Simula feature input (176 feature v04) input_dim = 176 # Autoencoder architecture v04 model = tf.keras.Sequential([ tf.keras.layers.Dense(128, activation='relu', input_shape=(input_dim,)), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(32, activation='relu'), tf.keras.layers.Dense(64, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(input_dim, activation='linear') ]) model.compile(optimizer='adam', loss='mse') # Test con batch realistico batch_size = 1000 X_test = tf.random.normal([batch_size, input_dim]) start_time = tf.timestamp() predictions = model(X_test, training=False) end_time = tf.timestamp() inference_time = (end_time - start_time).numpy() throughput = batch_size / inference_time print(f" ✅ Batch size: {batch_size}") print(f" ✅ Inference time: {inference_time:.4f}s") print(f" ✅ Throughput: {throughput:.0f} campioni/sec") print(f" ✅ Memory usage: {tf.config.experimental.get_memory_info('GPU:0')}") except Exception as e: print(f" ❌ Errore test autoencoder: {e}") print("\n🎯 RACCOMANDAZIONI PRODUZIONE:") print(" • Batch size ottimale: 1000-2000 campioni") print(" • Throughput atteso: 10k+ campioni/sec") print(" • Memory usage: ~4GB per training completo") print(" • Parallel ensemble: 5-6 modelli simultanei") def main(): """Main function""" # Configurazione GPU gpu_available = configure_gpu_v04() if gpu_available: # Test specifico DDoS v04 test_ddos_v04_gpu() print("\n🎉 CONFIGURAZIONE COMPLETATA!") print("=" * 50) print("✅ GPU configurata per sistema DDoS Detection v04") print("✅ Ottimizzazioni Tesla M60 applicate") print("✅ Mixed precision abilitata") print("✅ Throughput ottimizzato") print("\n🚀 COMANDI PRODUZIONE:") print(" python3 analisys_04.py --max-records 1000000 --memory-optimize") print(" python3 detect_multi_04.py --advanced --batch-size 2000") else: print("\n⚠️ GPU non disponibile - modalità CPU") print(" Il sistema v04 funzionerà ma con performance ridotte") if __name__ == "__main__": main()