Replit-Commit-Author: Agent Replit-Commit-Session-Id: 7a657272-55ba-4a79-9a2e-f1ed9bc7a528 Replit-Commit-Checkpoint-Type: full_checkpoint Replit-Commit-Event-Id: 1c71ce6e-1a3e-4f53-bb5d-77cdd22b8ea3
133 lines
4.6 KiB
Python
133 lines
4.6 KiB
Python
#!/usr/bin/env python3
|
||
"""
|
||
Test rapido per verificare correzioni Tesla M60 in analisys_04.py
|
||
"""
|
||
|
||
import os
|
||
import sys
|
||
|
||
def test_tesla_m60_config():
|
||
"""Test configurazione Tesla M60"""
|
||
print("🧪 TEST CONFIGURAZIONE TESLA M60")
|
||
print("=" * 50)
|
||
|
||
try:
|
||
# Test import TensorFlow con configurazione Tesla M60
|
||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
|
||
# ⚡ CRITICO: Configurazione Tesla M60 CC 5.2 ⚡
|
||
os.environ['TF_GPU_ALLOCATOR'] = 'legacy' # Necessario per CC 5.2
|
||
print("🔧 TF_GPU_ALLOCATOR=legacy configurato per Tesla M60 CC 5.2")
|
||
import tensorflow as tf
|
||
print(f"✅ TensorFlow {tf.__version__} importato")
|
||
|
||
# Test GPU detection
|
||
gpus = tf.config.list_physical_devices('GPU')
|
||
print(f"✅ GPU rilevate: {len(gpus)}")
|
||
|
||
if gpus:
|
||
gpu = gpus[0]
|
||
print(f" GPU: {gpu}")
|
||
|
||
# Test memory growth (sicuro)
|
||
try:
|
||
tf.config.experimental.set_memory_growth(gpu, True)
|
||
print("✅ Memory growth configurato")
|
||
memory_config = "memory_growth"
|
||
except ValueError as e:
|
||
if "virtual devices" in str(e):
|
||
print("ℹ️ Virtual devices già configurati")
|
||
memory_config = "virtual_device"
|
||
else:
|
||
print(f"⚠️ Memory config error: {e}")
|
||
memory_config = "error"
|
||
|
||
# Test mixed precision (con warning atteso)
|
||
try:
|
||
policy = tf.keras.mixed_precision.Policy('mixed_float16')
|
||
tf.keras.mixed_precision.set_global_policy(policy)
|
||
print("⚠️ Mixed precision abilitato (warning CC 5.2 atteso)")
|
||
except Exception as e:
|
||
print(f"❌ Mixed precision fallito: {e}")
|
||
# Fallback FP32
|
||
policy = tf.keras.mixed_precision.Policy('float32')
|
||
tf.keras.mixed_precision.set_global_policy(policy)
|
||
print("✅ Fallback FP32 configurato")
|
||
|
||
# Test operazione GPU semplice
|
||
try:
|
||
with tf.device('/GPU:0'):
|
||
a = tf.constant([[1.0, 2.0], [3.0, 4.0]])
|
||
b = tf.constant([[1.0, 0.0], [0.0, 1.0]])
|
||
c = tf.matmul(a, b)
|
||
result = c.numpy()
|
||
print(f"✅ Test operazione GPU: {result.shape}")
|
||
except Exception as e:
|
||
print(f"❌ Test GPU fallito: {e}")
|
||
|
||
return True
|
||
else:
|
||
print("❌ Nessuna GPU rilevata")
|
||
return False
|
||
|
||
except Exception as e:
|
||
print(f"❌ Test fallito: {e}")
|
||
return False
|
||
|
||
def test_batch_sizes():
|
||
"""Test calcolo batch sizes"""
|
||
print("\n🧪 TEST BATCH SIZES DINAMICI")
|
||
print("=" * 50)
|
||
|
||
# Simulazione batch size calculation
|
||
available_memory_gb = 7.0 # Tesla M60 conservativo
|
||
feature_count = 280
|
||
memory_per_sample_mb = (feature_count * 4) / 1024 / 1024
|
||
max_samples = int((available_memory_gb * 1024) / memory_per_sample_mb * 0.3)
|
||
|
||
batch_sizes = {
|
||
'feature_extraction': min(max_samples * 2, 8000),
|
||
'model_training': min(max_samples, 2048),
|
||
'prediction': min(max_samples * 3, 10000),
|
||
'autoencoder': min(max_samples // 2, 1024),
|
||
'lstm_sequence': min(max_samples, 4096),
|
||
}
|
||
|
||
print(f"✅ Feature count: {feature_count}")
|
||
print(f"✅ Memory per sample: {memory_per_sample_mb:.3f} MB")
|
||
print(f"✅ Max samples in memory: {max_samples:,}")
|
||
print("✅ Batch sizes calcolati:")
|
||
for name, size in batch_sizes.items():
|
||
print(f" {name}: {size:,}")
|
||
|
||
return True
|
||
|
||
def main():
|
||
"""Main test"""
|
||
print("🚀 TEST CORREZIONI TESLA M60 per analisys_04.py")
|
||
print("=" * 60)
|
||
|
||
success = True
|
||
|
||
# Test configurazione
|
||
if not test_tesla_m60_config():
|
||
success = False
|
||
|
||
# Test batch sizes
|
||
if not test_batch_sizes():
|
||
success = False
|
||
|
||
print("\n" + "=" * 60)
|
||
if success:
|
||
print("🎉 TUTTI I TEST SUPERATI!")
|
||
print("✅ analisys_04.py dovrebbe funzionare correttamente")
|
||
print("\n💡 Comando suggerito:")
|
||
print(" python analisys_04.py --max-records 80000 --demo")
|
||
else:
|
||
print("❌ ALCUNI TEST FALLITI")
|
||
print("⚠️ Verificare configurazione Tesla M60")
|
||
|
||
return success
|
||
|
||
if __name__ == "__main__":
|
||
success = main()
|
||
sys.exit(0 if success else 1) |