File size: 6,347 Bytes
c11c83e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import os
import torch
from functools import cache, wraps

# pylint: disable=protected-access, missing-function-docstring, line-too-long

# ARC GPUs can't allocate more than 4GB to a single block so we slice the attention layers

sdpa_slice_trigger_rate = float(os.environ.get('IPEX_SDPA_SLICE_TRIGGER_RATE', 1))
attention_slice_rate = float(os.environ.get('IPEX_ATTENTION_SLICE_RATE', 0.5))

# Find something divisible with the input_tokens
@cache
def find_split_size(original_size, slice_block_size, slice_rate=2):
    split_size = original_size
    while True:
        if (split_size * slice_block_size) <= slice_rate and original_size % split_size == 0:
            return split_size
        split_size = split_size - 1
        if split_size <= 1:
            return 1
    return split_size


# Find slice sizes for SDPA
@cache
def find_sdpa_slice_sizes(query_shape, key_shape, query_element_size, slice_rate=2, trigger_rate=3):
    batch_size, attn_heads, query_len, _ = query_shape
    _, _, key_len, _ = key_shape

    slice_batch_size = attn_heads * (query_len * key_len) * query_element_size / 1024 / 1024 / 1024

    split_batch_size = batch_size
    split_head_size = attn_heads
    split_query_size = query_len

    do_batch_split = False
    do_head_split = False
    do_query_split = False

    if batch_size * slice_batch_size >= trigger_rate:
        do_batch_split = True
        split_batch_size = find_split_size(batch_size, slice_batch_size, slice_rate=slice_rate)

        if split_batch_size * slice_batch_size > slice_rate:
            slice_head_size = split_batch_size * (query_len * key_len) * query_element_size / 1024 / 1024 / 1024
            do_head_split = True
            split_head_size = find_split_size(attn_heads, slice_head_size, slice_rate=slice_rate)

            if split_head_size * slice_head_size > slice_rate:
                slice_query_size = split_batch_size * split_head_size * (key_len) * query_element_size / 1024 / 1024 / 1024
                do_query_split = True
                split_query_size = find_split_size(query_len, slice_query_size, slice_rate=slice_rate)

    return do_batch_split, do_head_split, do_query_split, split_batch_size, split_head_size, split_query_size


original_scaled_dot_product_attention = torch.nn.functional.scaled_dot_product_attention
@wraps(torch.nn.functional.scaled_dot_product_attention)
def dynamic_scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, **kwargs):
    if query.device.type != "xpu":
        return original_scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, **kwargs)
    is_unsqueezed = False
    if len(query.shape) == 3:
        query = query.unsqueeze(0)
        is_unsqueezed = True
    if len(key.shape) == 3:
        key = key.unsqueeze(0)
    if len(value.shape) == 3:
        value = value.unsqueeze(0)
    do_batch_split, do_head_split, do_query_split, split_batch_size, split_head_size, split_query_size = find_sdpa_slice_sizes(query.shape, key.shape, query.element_size(), slice_rate=attention_slice_rate, trigger_rate=sdpa_slice_trigger_rate)

    # Slice SDPA
    if do_batch_split:
        batch_size, attn_heads, query_len, _ = query.shape
        _, _, _, head_dim = value.shape
        hidden_states = torch.zeros((batch_size, attn_heads, query_len, head_dim), device=query.device, dtype=query.dtype)
        if attn_mask is not None:
            attn_mask = attn_mask.expand((query.shape[0], query.shape[1], query.shape[2], key.shape[-2]))
        for ib in range(batch_size // split_batch_size):
            start_idx = ib * split_batch_size
            end_idx = (ib + 1) * split_batch_size
            if do_head_split:
                for ih in range(attn_heads // split_head_size): # pylint: disable=invalid-name
                    start_idx_h = ih * split_head_size
                    end_idx_h = (ih + 1) * split_head_size
                    if do_query_split:
                        for iq in range(query_len // split_query_size): # pylint: disable=invalid-name
                            start_idx_q = iq * split_query_size
                            end_idx_q = (iq + 1) * split_query_size
                            hidden_states[start_idx:end_idx, start_idx_h:end_idx_h, start_idx_q:end_idx_q, :] = original_scaled_dot_product_attention(
                                query[start_idx:end_idx, start_idx_h:end_idx_h, start_idx_q:end_idx_q, :],
                                key[start_idx:end_idx, start_idx_h:end_idx_h, :, :],
                                value[start_idx:end_idx, start_idx_h:end_idx_h, :, :],
                                attn_mask=attn_mask[start_idx:end_idx, start_idx_h:end_idx_h, start_idx_q:end_idx_q, :] if attn_mask is not None else attn_mask,
                                dropout_p=dropout_p, is_causal=is_causal, **kwargs
                            )
                    else:
                        hidden_states[start_idx:end_idx, start_idx_h:end_idx_h, :, :] = original_scaled_dot_product_attention(
                            query[start_idx:end_idx, start_idx_h:end_idx_h, :, :],
                            key[start_idx:end_idx, start_idx_h:end_idx_h, :, :],
                            value[start_idx:end_idx, start_idx_h:end_idx_h, :, :],
                            attn_mask=attn_mask[start_idx:end_idx, start_idx_h:end_idx_h, :, :] if attn_mask is not None else attn_mask,
                            dropout_p=dropout_p, is_causal=is_causal, **kwargs
                        )
            else:
                hidden_states[start_idx:end_idx, :, :, :] = original_scaled_dot_product_attention(
                    query[start_idx:end_idx, :, :, :],
                    key[start_idx:end_idx, :, :, :],
                    value[start_idx:end_idx, :, :, :],
                    attn_mask=attn_mask[start_idx:end_idx, :, :, :] if attn_mask is not None else attn_mask,
                    dropout_p=dropout_p, is_causal=is_causal, **kwargs
                )
        torch.xpu.synchronize(query.device)
    else:
        hidden_states = original_scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, **kwargs)
    if is_unsqueezed:
        hidden_states.squeeze(0)
    return hidden_states