oKen38461 commited on
Commit
4b3b80d
·
1 Parent(s): c9f7ce3

Fix remaining CUDA device capability errors in wgp.py and sage2_core.py

Browse files
Files changed (2) hide show
  1. wan/modules/sage2_core.py +21 -11
  2. wgp.py +18 -5
wan/modules/sage2_core.py CHANGED
@@ -54,19 +54,29 @@ import warnings
54
  import os
55
 
56
  def is_sage2_supported():
57
- device_count = torch.cuda.device_count()
58
- for i in range(device_count):
59
- major, minor = torch.cuda.get_device_capability(i)
60
- if major < 8:
61
- return False
62
- return True
 
 
 
 
 
63
 
64
  def get_cuda_arch_versions():
65
- cuda_archs = []
66
- for i in range(torch.cuda.device_count()):
67
- major, minor = torch.cuda.get_device_capability(i)
68
- cuda_archs.append(f"sm{major}{minor}")
69
- return cuda_archs
 
 
 
 
 
70
 
71
  def sageattn(
72
  qkv_list,
 
54
  import os
55
 
56
  def is_sage2_supported():
57
+ if not torch.cuda.is_available():
58
+ return False
59
+ try:
60
+ device_count = torch.cuda.device_count()
61
+ for i in range(device_count):
62
+ major, minor = torch.cuda.get_device_capability(i)
63
+ if major < 8:
64
+ return False
65
+ return True
66
+ except (RuntimeError, AssertionError):
67
+ return False
68
 
69
  def get_cuda_arch_versions():
70
+ if not torch.cuda.is_available():
71
+ return ["sm75"] # Fallback architecture
72
+ try:
73
+ cuda_archs = []
74
+ for i in range(torch.cuda.device_count()):
75
+ major, minor = torch.cuda.get_device_capability(i)
76
+ cuda_archs.append(f"sm{major}{minor}")
77
+ return cuda_archs
78
+ except (RuntimeError, AssertionError):
79
+ return ["sm75"] # Fallback architecture
80
 
81
  def sageattn(
82
  qkv_list,
wgp.py CHANGED
@@ -1496,12 +1496,25 @@ attention_modes_installed = get_attention_modes()
1496
  attention_modes_supported = get_supported_attention_modes()
1497
  args = _parse_args()
1498
 
1499
- major, minor = torch.cuda.get_device_capability(args.gpu if len(args.gpu) > 0 else None)
1500
- if major < 8:
1501
- print("Switching to FP16 models when possible as GPU architecture doesn't support optimed BF16 Kernels")
1502
- bfloat16_supported = False
 
 
 
 
 
 
 
 
 
 
 
1503
  else:
1504
- bfloat16_supported = True
 
 
1505
 
1506
  args.flow_reverse = True
1507
  processing_device = args.gpu
 
1496
  attention_modes_supported = get_supported_attention_modes()
1497
  args = _parse_args()
1498
 
1499
+ # Check CUDA availability and device capability safely
1500
+ if torch.cuda.is_available():
1501
+ try:
1502
+ device_id = args.gpu[0] if len(args.gpu) > 0 else None
1503
+ major, minor = torch.cuda.get_device_capability(device_id)
1504
+ if major < 8:
1505
+ print("Switching to FP16 models when possible as GPU architecture doesn't support optimized BF16 Kernels")
1506
+ bfloat16_supported = False
1507
+ else:
1508
+ bfloat16_supported = True
1509
+ except (RuntimeError, AssertionError, IndexError):
1510
+ # Fallback if CUDA device is not accessible or invalid device ID
1511
+ print("CUDA device not accessible or invalid device ID, using fallback settings")
1512
+ major, minor = 0, 0
1513
+ bfloat16_supported = False
1514
  else:
1515
+ print("CUDA not available, using CPU mode")
1516
+ major, minor = 0, 0
1517
+ bfloat16_supported = False
1518
 
1519
  args.flow_reverse = True
1520
  processing_device = args.gpu