alexnasa's picture
Upload 54 files
295978e verified
# Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
import warnings
import torch
from typing import Optional, Tuple
try:
import flash_attn_interface
FLASH_ATTN_3_AVAILABLE = True
except ModuleNotFoundError:
FLASH_ATTN_3_AVAILABLE = False
try:
import flash_attn
FLASH_ATTN_2_AVAILABLE = True
except ModuleNotFoundError:
FLASH_ATTN_2_AVAILABLE = False
__all__ = [
'flash_attention',
'attention',
]
# ---------------------------
# Custom op + fake kernel
# ---------------------------
from typing import Optional, Sequence # <- add Sequence
# ... imports unchanged ...
from typing import Optional, Sequence
@torch.library.custom_op("wan::flash_attention", mutates_args=())
def _wan_flash_attention_op(
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
q_lens: Optional[torch.Tensor] = None,
k_lens: Optional[torch.Tensor] = None,
dropout_p: float = 0.0,
softmax_scale: Optional[float] = None,
q_scale: Optional[float] = None,
causal: bool = False,
# IMPORTANT: schema-friendly default (None), not a tuple
window_size: Optional[Sequence[int]] = None,
deterministic: bool = False,
dtype: torch.dtype = torch.bfloat16,
version: Optional[int] = None,
) -> torch.Tensor:
half_dtypes = (torch.float16, torch.bfloat16)
assert dtype in half_dtypes
assert q.size(-1) <= 256
# normalize window_size to a 2-tuple for FA2 API
if window_size is None:
ws = (-1, -1)
else:
ws = tuple(window_size)
if len(ws) != 2:
raise ValueError(f"window_size must have length 2; got {window_size!r}")
b, lq, nheads = q.shape[0], q.shape[1], q.shape[2]
lk = k.shape[1]
out_dtype = q.dtype
def half(x: torch.Tensor) -> torch.Tensor:
return x if x.dtype in half_dtypes else x.to(dtype)
# --- preprocess (unchanged) ---
if q_lens is None:
q_flat = half(q.flatten(0, 1))
q_lens = torch.tensor([lq] * b, dtype=torch.int32)
else:
q_flat = half(torch.cat([u[:v] for u, v in zip(q, q_lens)]))
if k_lens is None:
k_flat = half(k.flatten(0, 1))
v_flat = half(v.flatten(0, 1))
k_lens = torch.tensor([lk] * b, dtype=torch.int32)
else:
k_flat = half(torch.cat([u[:v] for u, v in zip(k, k_lens)]))
v_flat = half(torch.cat([u[:v] for u, v in zip(v, k_lens)]))
q_flat = q_flat.to(v_flat.dtype); k_flat = k_flat.to(v_flat.dtype)
if q_scale is not None:
q_flat = q_flat * q_scale
if version is not None and version == 3 and not FLASH_ATTN_3_AVAILABLE:
warnings.warn('Flash attention 3 is not available, use flash attention 2 instead.')
if FLASH_ATTN_3_AVAILABLE:
ret = flash_attn_interface.flash_attn_varlen_func(
q=q_flat,
k=k_flat,
v=v_flat,
cu_seqlens_q=torch.cat([q_lens.new_zeros([1]), q_lens]).cumsum(0, dtype=torch.int32).to(q_flat.device, non_blocking=True),
cu_seqlens_k=torch.cat([k_lens.new_zeros([1]), k_lens]).cumsum(0, dtype=torch.int32).to(k_flat.device, non_blocking=True),
seqused_q=None,
seqused_k=None,
max_seqlen_q=lq,
max_seqlen_k=lk,
softmax_scale=softmax_scale,
causal=causal,
deterministic=deterministic,
)
out0 = ret[0] if isinstance(ret, (tuple, list)) else ret
total_q = b * lq
if out0.dim() != 3:
raise RuntimeError(f"Unexpected FA3 output rank {out0.dim()} shape={tuple(out0.shape)}")
if out0.shape[0] == total_q:
out_flat = out0
elif out0.shape[0] == nheads and out0.shape[1] == total_q:
out_flat = out0.transpose(0, 1).contiguous()
else:
raise RuntimeError(f"Unexpected FA3 output shape {tuple(out0.shape)}")
out = out_flat.unflatten(0, (b, lq))
elif FLASH_ATTN_2_AVAILABLE:
out = flash_attn.flash_attn_varlen_func(
q=q_flat,
k=k_flat,
v=v_flat,
cu_seqlens_q=torch.cat([q_lens.new_zeros([1]), q_lens]).cumsum(0, dtype=torch.int32).to(q_flat.device, non_blocking=True),
cu_seqlens_k=torch.cat([k_lens.new_zeros([1]), k_lens]).cumsum(0, dtype=torch.int32).to(q_flat.device, non_blocking=True),
max_seqlen_q=lq,
max_seqlen_k=lk,
dropout_p=dropout_p,
softmax_scale=softmax_scale,
causal=causal,
window_size=ws, # <- pass 2-tuple
deterministic=deterministic,
).unflatten(0, (b, lq))
else:
q_s = q.transpose(1, 2).to(dtype)
k_s = k.transpose(1, 2).to(dtype)
v_s = v.transpose(1, 2).to(dtype)
out = torch.nn.functional.scaled_dot_product_attention(
q_s, k_s, v_s, attn_mask=None, is_causal=causal, dropout_p=dropout_p
).transpose(1, 2).contiguous()
return out.to(out_dtype)
@_wan_flash_attention_op.register_fake
def _wan_flash_attention_op_fake(
q,
k,
v,
q_lens=None,
k_lens=None,
dropout_p: float = 0.0,
softmax_scale=None,
q_scale=None,
causal: bool = False,
window_size: Optional[Sequence[int]] = None,
deterministic: bool = False,
dtype: torch.dtype = torch.bfloat16,
version: Optional[int] = None,
):
# Match output shape: (B, Lq, Nq, Dh_v) and keep the SAME fake device as `q`
B, Lq, Nq, _ = q.shape
Dh_v = v.shape[-1]
return q.new_empty((B, Lq, Nq, Dh_v), dtype=q.dtype)
# ---------------------------
# Public API (unchanged signature)
# ---------------------------
def flash_attention(
q,
k,
v,
q_lens=None,
k_lens=None,
dropout_p=0.,
softmax_scale=None,
q_scale=None,
causal=False,
window_size=(-1, -1),
deterministic=False,
dtype=torch.bfloat16,
version=None,
):
"""
q: [B, Lq, Nq, C1].
k: [B, Lk, Nk, C1].
v: [B, Lk, Nk, C2]. Nq must be divisible by Nk.
q_lens: [B].
k_lens: [B].
dropout_p: float. Dropout probability.
softmax_scale: float. The scaling of QK^T before applying softmax.
causal: bool. Whether to apply causal attention mask.
window_size: (left right). If not (-1, -1), apply sliding window local attention.
deterministic: bool. If True, slightly slower and uses more memory.
dtype: torch.dtype. Apply when dtype of q/k/v is not float16/bfloat16.
"""
# Simply delegate to the custom op so Dynamo/AOT treats it as a single node;
# our eager kernel inside _wan_flash_attention_op keeps the original behavior.
return _wan_flash_attention_op(
q, k, v,
q_lens=q_lens,
k_lens=k_lens,
dropout_p=dropout_p,
softmax_scale=softmax_scale,
q_scale=q_scale,
causal=causal,
window_size=window_size,
deterministic=deterministic,
dtype=dtype,
version=version,
)
def attention(
q,
k,
v,
q_lens=None,
k_lens=None,
dropout_p=0.,
softmax_scale=None,
q_scale=None,
causal=False,
window_size=(-1, -1),
deterministic=False,
dtype=torch.bfloat16,
fa_version=None,
):
if FLASH_ATTN_2_AVAILABLE or FLASH_ATTN_3_AVAILABLE:
return flash_attention(
q=q,
k=k,
v=v,
q_lens=q_lens,
k_lens=k_lens,
dropout_p=dropout_p,
softmax_scale=softmax_scale,
q_scale=q_scale,
causal=causal,
window_size=window_size,
deterministic=deterministic,
dtype=dtype,
version=fa_version,
)
else:
if q_lens is not None or k_lens is not None:
warnings.warn(
'Padding mask is disabled when using scaled_dot_product_attention. It can have a significant impact on performance.'
)
q_ = q.transpose(1, 2).to(dtype)
k_ = k.transpose(1, 2).to(dtype)
v_ = v.transpose(1, 2).to(dtype)
out = torch.nn.functional.scaled_dot_product_attention(
q_, k_, v_, attn_mask=None, is_causal=causal, dropout_p=dropout_p
)
return out.transpose(1, 2).contiguous()