#!/usr/bin/env python3 """ Test rapido per verificare correzioni Tesla M60 in analisys_04.py """ import os import sys def test_tesla_m60_config(): """Test configurazione Tesla M60""" print("๐Ÿงช TEST CONFIGURAZIONE TESLA M60") print("=" * 50) try: # Test import TensorFlow con configurazione Tesla M60 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # โšก CRITICO: Configurazione Tesla M60 CC 5.2 โšก os.environ['TF_GPU_ALLOCATOR'] = 'legacy' # Necessario per CC 5.2 print("๐Ÿ”ง TF_GPU_ALLOCATOR=legacy configurato per Tesla M60 CC 5.2") import tensorflow as tf print(f"โœ… TensorFlow {tf.__version__} importato") # Test GPU detection gpus = tf.config.list_physical_devices('GPU') print(f"โœ… GPU rilevate: {len(gpus)}") if gpus: gpu = gpus[0] print(f" GPU: {gpu}") # Test memory growth (sicuro) try: tf.config.experimental.set_memory_growth(gpu, True) print("โœ… Memory growth configurato") memory_config = "memory_growth" except ValueError as e: if "virtual devices" in str(e): print("โ„น๏ธ Virtual devices giร  configurati") memory_config = "virtual_device" else: print(f"โš ๏ธ Memory config error: {e}") memory_config = "error" # Test mixed precision (con warning atteso) try: policy = tf.keras.mixed_precision.Policy('mixed_float16') tf.keras.mixed_precision.set_global_policy(policy) print("โš ๏ธ Mixed precision abilitato (warning CC 5.2 atteso)") except Exception as e: print(f"โŒ Mixed precision fallito: {e}") # Fallback FP32 policy = tf.keras.mixed_precision.Policy('float32') tf.keras.mixed_precision.set_global_policy(policy) print("โœ… Fallback FP32 configurato") # Test operazione GPU semplice try: with tf.device('/GPU:0'): a = tf.constant([[1.0, 2.0], [3.0, 4.0]]) b = tf.constant([[1.0, 0.0], [0.0, 1.0]]) c = tf.matmul(a, b) result = c.numpy() print(f"โœ… Test operazione GPU: {result.shape}") except Exception as e: print(f"โŒ Test GPU fallito: {e}") return True else: print("โŒ Nessuna GPU rilevata") return False except Exception as e: print(f"โŒ Test fallito: {e}") return False def test_batch_sizes(): """Test calcolo batch sizes""" print("\n๐Ÿงช TEST BATCH SIZES DINAMICI") print("=" * 50) # Simulazione batch size calculation available_memory_gb = 7.0 # Tesla M60 conservativo feature_count = 280 memory_per_sample_mb = (feature_count * 4) / 1024 / 1024 max_samples = int((available_memory_gb * 1024) / memory_per_sample_mb * 0.3) batch_sizes = { 'feature_extraction': min(max_samples * 2, 8000), 'model_training': min(max_samples, 2048), 'prediction': min(max_samples * 3, 10000), 'autoencoder': min(max_samples // 2, 1024), 'lstm_sequence': min(max_samples, 4096), } print(f"โœ… Feature count: {feature_count}") print(f"โœ… Memory per sample: {memory_per_sample_mb:.3f} MB") print(f"โœ… Max samples in memory: {max_samples:,}") print("โœ… Batch sizes calcolati:") for name, size in batch_sizes.items(): print(f" {name}: {size:,}") return True def main(): """Main test""" print("๐Ÿš€ TEST CORREZIONI TESLA M60 per analisys_04.py") print("=" * 60) success = True # Test configurazione if not test_tesla_m60_config(): success = False # Test batch sizes if not test_batch_sizes(): success = False print("\n" + "=" * 60) if success: print("๐ŸŽ‰ TUTTI I TEST SUPERATI!") print("โœ… analisys_04.py dovrebbe funzionare correttamente") print("\n๐Ÿ’ก Comando suggerito:") print(" python analisys_04.py --max-records 80000 --demo") else: print("โŒ ALCUNI TEST FALLITI") print("โš ๏ธ Verificare configurazione Tesla M60") return success if __name__ == "__main__": success = main() sys.exit(0 if success else 1)