Spaces:
Runtime error
Runtime error
Martijn van Beers
commited on
Commit
·
9d1fa85
1
Parent(s):
6c01ee5
Clean up code
Browse files* separates out the code for the two methods
* use gradio Blocks instead of Interface for flexibility
* add a markdown file for a note on explainability models and their
limitations, filled with a placeholder for now
- app.py +33 -271
- description.md +2 -2
- lib/gradient_rollout.py +112 -0
- lib/integrated_gradients.py +90 -0
- lib/util.py +86 -0
- notice.md +1 -0
app.py
CHANGED
|
@@ -1,291 +1,53 @@
|
|
| 1 |
import sys
|
| 2 |
import pandas
|
| 3 |
import gradio
|
|
|
|
| 4 |
|
| 5 |
sys.path.append("lib")
|
| 6 |
|
| 7 |
import torch
|
| 8 |
|
|
|
|
|
|
|
|
|
|
| 9 |
from transformers import AutoModelForSequenceClassification
|
| 10 |
-
from BERT_explainability.ExplanationGenerator import Generator
|
| 11 |
-
from BERT_explainability.roberta2 import RobertaForSequenceClassification
|
| 12 |
from transformers import AutoTokenizer
|
| 13 |
from captum.attr import LayerIntegratedGradients
|
| 14 |
from captum.attr import visualization
|
|
|
|
| 15 |
import torch
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
"""
|
| 20 |
-
Transforms each channel to the range [0, 1].
|
| 21 |
-
"""
|
| 22 |
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
d = self.d
|
| 28 |
-
scale = 1.0 / (
|
| 29 |
-
tensor.max(dim=d, keepdim=True)[0] - tensor.min(dim=d, keepdim=True)[0]
|
| 30 |
-
)
|
| 31 |
-
tensor.mul_(scale).sub_(tensor.min(dim=d, keepdim=True)[0])
|
| 32 |
-
return tensor
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
if torch.cuda.is_available():
|
| 36 |
-
device = torch.device("cuda")
|
| 37 |
-
else:
|
| 38 |
-
device = torch.device("cpu")
|
| 39 |
-
|
| 40 |
-
model = RobertaForSequenceClassification.from_pretrained(
|
| 41 |
-
"textattack/roberta-base-SST-2"
|
| 42 |
-
).to(device)
|
| 43 |
-
model.eval()
|
| 44 |
-
model2 = AutoModelForSequenceClassification.from_pretrained("textattack/roberta-base-SST-2")
|
| 45 |
-
tokenizer = AutoTokenizer.from_pretrained("textattack/roberta-base-SST-2")
|
| 46 |
-
# initialize the explanations generator
|
| 47 |
-
explanations = Generator(model, "roberta")
|
| 48 |
-
|
| 49 |
-
classifications = ["NEGATIVE", "POSITIVE"]
|
| 50 |
-
|
| 51 |
-
# rule 5 from paper
|
| 52 |
-
def avg_heads(cam, grad):
|
| 53 |
-
cam = (grad * cam).clamp(min=0).mean(dim=-3)
|
| 54 |
-
# set negative values to 0, then average
|
| 55 |
-
# cam = cam.clamp(min=0).mean(dim=0)
|
| 56 |
-
return cam
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
# rule 6 from paper
|
| 60 |
-
def apply_self_attention_rules(R_ss, cam_ss):
|
| 61 |
-
R_ss_addition = torch.matmul(cam_ss, R_ss)
|
| 62 |
-
return R_ss_addition
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
def generate_relevance(model, input_ids, attention_mask, index=None, start_layer=0):
|
| 66 |
-
output = model(input_ids=input_ids, attention_mask=attention_mask)[0]
|
| 67 |
-
if index == None:
|
| 68 |
-
# index = np.expand_dims(np.arange(input_ids.shape[1])
|
| 69 |
-
# by default explain the class with the highest score
|
| 70 |
-
index = output.argmax(axis=-1).detach().cpu().numpy()
|
| 71 |
-
|
| 72 |
-
# create a one-hot vector selecting class we want explanations for
|
| 73 |
-
one_hot = (
|
| 74 |
-
torch.nn.functional.one_hot(
|
| 75 |
-
torch.tensor(index, dtype=torch.int64), num_classes=output.size(-1)
|
| 76 |
-
)
|
| 77 |
-
.to(torch.float)
|
| 78 |
-
.requires_grad_(True)
|
| 79 |
-
).to(device)
|
| 80 |
-
one_hot = torch.sum(one_hot * output)
|
| 81 |
-
model.zero_grad()
|
| 82 |
-
# create the gradients for the class we're interested in
|
| 83 |
-
one_hot.backward(retain_graph=True)
|
| 84 |
-
|
| 85 |
-
num_tokens = model.roberta.encoder.layer[0].attention.self.get_attn().shape[-1]
|
| 86 |
-
R = torch.eye(num_tokens).expand(output.size(0), -1, -1).clone().to(device)
|
| 87 |
-
|
| 88 |
-
for i, blk in enumerate(model.roberta.encoder.layer):
|
| 89 |
-
if i < start_layer:
|
| 90 |
-
continue
|
| 91 |
-
grad = blk.attention.self.get_attn_gradients()
|
| 92 |
-
cam = blk.attention.self.get_attn()
|
| 93 |
-
cam = avg_heads(cam, grad)
|
| 94 |
-
joint = apply_self_attention_rules(R, cam)
|
| 95 |
-
R += joint
|
| 96 |
-
return output, R[:, 0, 1:-1]
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
def visualize_text(datarecords, legend=True):
|
| 100 |
-
dom = ["<table width: 100%>"]
|
| 101 |
-
rows = [
|
| 102 |
-
"<tr><th>True Label</th>"
|
| 103 |
-
"<th>Predicted Label</th>"
|
| 104 |
-
"<th>Attribution Label</th>"
|
| 105 |
-
"<th>Attribution Score</th>"
|
| 106 |
-
"<th>Word Importance</th>"
|
| 107 |
-
]
|
| 108 |
-
for datarecord in datarecords:
|
| 109 |
-
rows.append(
|
| 110 |
-
"".join(
|
| 111 |
-
[
|
| 112 |
-
"<tr>",
|
| 113 |
-
visualization.format_classname(datarecord.true_class),
|
| 114 |
-
visualization.format_classname(
|
| 115 |
-
"{0} ({1:.2f})".format(
|
| 116 |
-
datarecord.pred_class, datarecord.pred_prob
|
| 117 |
-
)
|
| 118 |
-
),
|
| 119 |
-
visualization.format_classname(datarecord.attr_class),
|
| 120 |
-
visualization.format_classname(
|
| 121 |
-
"{0:.2f}".format(datarecord.attr_score)
|
| 122 |
-
),
|
| 123 |
-
visualization.format_word_importances(
|
| 124 |
-
datarecord.raw_input_ids, datarecord.word_attributions
|
| 125 |
-
),
|
| 126 |
-
"<tr>",
|
| 127 |
-
]
|
| 128 |
-
)
|
| 129 |
-
)
|
| 130 |
-
|
| 131 |
-
if legend:
|
| 132 |
-
dom.append(
|
| 133 |
-
'<div style="border-top: 1px solid; margin-top: 5px; \
|
| 134 |
-
padding-top: 5px; display: inline-block">'
|
| 135 |
-
)
|
| 136 |
-
dom.append("<b>Legend: </b>")
|
| 137 |
-
|
| 138 |
-
for value, label in zip([-1, 0, 1], ["Negative", "Neutral", "Positive"]):
|
| 139 |
-
dom.append(
|
| 140 |
-
'<span style="display: inline-block; width: 10px; height: 10px; \
|
| 141 |
-
border: 1px solid; background-color: \
|
| 142 |
-
{value}"></span> {label} '.format(
|
| 143 |
-
value=visualization._get_color(value), label=label
|
| 144 |
-
)
|
| 145 |
-
)
|
| 146 |
-
dom.append("</div>")
|
| 147 |
-
|
| 148 |
-
dom.append("".join(rows))
|
| 149 |
-
dom.append("</table>")
|
| 150 |
-
html = "".join(dom)
|
| 151 |
-
|
| 152 |
-
return html
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
def show_explanation(model, input_ids, attention_mask, index=None, start_layer=8):
|
| 156 |
-
# generate an explanation for the input
|
| 157 |
-
output, expl = generate_relevance(
|
| 158 |
-
model, input_ids, attention_mask, index=index, start_layer=start_layer
|
| 159 |
-
)
|
| 160 |
-
# normalize scores
|
| 161 |
-
scaler = PyTMinMaxScalerVectorized()
|
| 162 |
-
|
| 163 |
-
norm = scaler(expl)
|
| 164 |
-
# get the model classification
|
| 165 |
-
output = torch.nn.functional.softmax(output, dim=-1)
|
| 166 |
-
|
| 167 |
-
vis_data_records = []
|
| 168 |
-
for record in range(input_ids.size(0)):
|
| 169 |
-
classification = output[record].argmax(dim=-1).item()
|
| 170 |
-
class_name = classifications[classification]
|
| 171 |
-
nrm = norm[record]
|
| 172 |
-
|
| 173 |
-
# if the classification is negative, higher explanation scores are more negative
|
| 174 |
-
# flip for visualization
|
| 175 |
-
if class_name == "NEGATIVE":
|
| 176 |
-
nrm *= -1
|
| 177 |
-
tokens = tokenizer.convert_ids_to_tokens(input_ids[record].flatten())[
|
| 178 |
-
1 : 0 - ((attention_mask[record] == 0).sum().item() + 1)
|
| 179 |
-
]
|
| 180 |
-
# vis_data_records.append(list(zip(tokens, nrm.tolist())))
|
| 181 |
-
vis_data_records.append(
|
| 182 |
-
visualization.VisualizationDataRecord(
|
| 183 |
-
nrm,
|
| 184 |
-
output[record][classification],
|
| 185 |
-
classification,
|
| 186 |
-
classification,
|
| 187 |
-
index,
|
| 188 |
-
1,
|
| 189 |
-
tokens,
|
| 190 |
-
1,
|
| 191 |
-
)
|
| 192 |
-
)
|
| 193 |
-
return visualize_text(vis_data_records)
|
| 194 |
-
|
| 195 |
-
def custom_forward(inputs, attention_mask=None, pos=0):
|
| 196 |
-
result = model2(inputs, attention_mask=attention_mask, return_dict=True)
|
| 197 |
-
preds = result.logits
|
| 198 |
-
return preds
|
| 199 |
-
|
| 200 |
-
def summarize_attributions(attributions):
|
| 201 |
-
attributions = attributions.sum(dim=-1).squeeze(0)
|
| 202 |
-
attributions = attributions / torch.norm(attributions)
|
| 203 |
-
return attributions
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
def run_attribution_model(input_ids, attention_mask, ref_token_id=tokenizer.unk_token_id, layer=None, steps=20):
|
| 207 |
-
try:
|
| 208 |
-
output = model2(input_ids=input_ids, attention_mask=attention_mask)[0]
|
| 209 |
-
index = output.argmax(axis=-1).detach().cpu().numpy()
|
| 210 |
-
|
| 211 |
-
ablator = LayerIntegratedGradients(custom_forward, layer)
|
| 212 |
-
input_tensor = input_ids
|
| 213 |
-
attention_mask = attention_mask
|
| 214 |
-
attributions = ablator.attribute(
|
| 215 |
-
inputs=input_ids,
|
| 216 |
-
baselines=ref_token_id,
|
| 217 |
-
additional_forward_args=(attention_mask),
|
| 218 |
-
target=1,
|
| 219 |
-
n_steps=steps,
|
| 220 |
-
)
|
| 221 |
-
attributions = summarize_attributions(attributions).unsqueeze_(0)
|
| 222 |
-
finally:
|
| 223 |
-
pass
|
| 224 |
-
vis_data_records = []
|
| 225 |
-
for record in range(input_ids.size(0)):
|
| 226 |
-
classification = output[record].argmax(dim=-1).item()
|
| 227 |
-
class_name = classifications[classification]
|
| 228 |
-
attr = attributions[record]
|
| 229 |
-
tokens = tokenizer.convert_ids_to_tokens(input_ids[record].flatten())[
|
| 230 |
-
1 : 0 - ((attention_mask[record] == 0).sum().item() + 1)
|
| 231 |
-
]
|
| 232 |
-
vis_data_records.append(
|
| 233 |
-
visualization.VisualizationDataRecord(
|
| 234 |
-
attr,
|
| 235 |
-
output[record][classification],
|
| 236 |
-
classification,
|
| 237 |
-
classification,
|
| 238 |
-
index,
|
| 239 |
-
1,
|
| 240 |
-
tokens,
|
| 241 |
-
1,
|
| 242 |
-
)
|
| 243 |
-
)
|
| 244 |
-
return visualize_text(vis_data_records)
|
| 245 |
-
|
| 246 |
-
def sentence_sentiment(input_text, layer):
|
| 247 |
-
text_batch = [input_text]
|
| 248 |
-
encoding = tokenizer(text_batch, return_tensors="pt")
|
| 249 |
-
input_ids = encoding["input_ids"].to(device)
|
| 250 |
-
attention_mask = encoding["attention_mask"].to(device)
|
| 251 |
-
layer = int(layer)
|
| 252 |
-
if layer == 0:
|
| 253 |
-
layer = model2.roberta.embeddings
|
| 254 |
-
else:
|
| 255 |
-
layer = getattr(model2.roberta.encoder.layer, str(layer-1))
|
| 256 |
-
|
| 257 |
-
output = run_attribution_model(input_ids, attention_mask, layer=layer)
|
| 258 |
-
return output
|
| 259 |
-
|
| 260 |
-
def sentiment_explanation_hila(input_text, layer):
|
| 261 |
-
text_batch = [input_text]
|
| 262 |
-
encoding = tokenizer(text_batch, return_tensors="pt")
|
| 263 |
-
input_ids = encoding["input_ids"].to(device)
|
| 264 |
-
attention_mask = encoding["attention_mask"].to(device)
|
| 265 |
-
|
| 266 |
-
# true class is positive - 1
|
| 267 |
-
true_class = 1
|
| 268 |
-
|
| 269 |
-
return show_explanation(model, input_ids, attention_mask, start_layer=int(layer))
|
| 270 |
-
|
| 271 |
-
layer_slider = gradio.Slider(minimum=0, maximum=12, value=8, step=1, label="Select layer")
|
| 272 |
-
hila = gradio.Interface(
|
| 273 |
-
fn=sentiment_explanation_hila,
|
| 274 |
-
inputs=["text", layer_slider],
|
| 275 |
-
outputs="html",
|
| 276 |
-
)
|
| 277 |
-
# layer_slider2 = gradio.Slider(minimum=0, maximum=12, value=0, step=1, label="Select IG layer")
|
| 278 |
-
lig = gradio.Interface(
|
| 279 |
-
fn=sentence_sentiment,
|
| 280 |
-
inputs=["text", layer_slider],
|
| 281 |
-
outputs="html",
|
| 282 |
-
)
|
| 283 |
-
|
| 284 |
-
with open("description.md", "r") as fh:
|
| 285 |
-
description = fh.read()
|
| 286 |
|
| 287 |
examples = pandas.read_csv("examples.csv").to_numpy().tolist()
|
| 288 |
|
| 289 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 290 |
|
| 291 |
iface.launch()
|
|
|
|
| 1 |
import sys
|
| 2 |
import pandas
|
| 3 |
import gradio
|
| 4 |
+
import pathlib
|
| 5 |
|
| 6 |
sys.path.append("lib")
|
| 7 |
|
| 8 |
import torch
|
| 9 |
|
| 10 |
+
from roberta2 import RobertaForSequenceClassification
|
| 11 |
+
from gradient_rollout import GradientRolloutExplainer
|
| 12 |
+
from integrated_gradients import IntegratedGradientsExplainer
|
| 13 |
from transformers import AutoModelForSequenceClassification
|
|
|
|
|
|
|
| 14 |
from transformers import AutoTokenizer
|
| 15 |
from captum.attr import LayerIntegratedGradients
|
| 16 |
from captum.attr import visualization
|
| 17 |
+
import util
|
| 18 |
import torch
|
| 19 |
|
| 20 |
+
ig_explainer = IntegratedGradientsExplainer()
|
| 21 |
+
gr_explainer = GradientRolloutExplainer()
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
+
def run(sent, rollout, ig):
|
| 24 |
+
a = gr_explainer(sent, rollout)
|
| 25 |
+
b = ig_explainer(sent, ig)
|
| 26 |
+
return a, b
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
examples = pandas.read_csv("examples.csv").to_numpy().tolist()
|
| 29 |
|
| 30 |
+
with gradio.Blocks(title="Explanations with attention rollout") as iface:
|
| 31 |
+
util.Markdown(pathlib.Path("description.md"))
|
| 32 |
+
with gradio.Row(equal_height=True):
|
| 33 |
+
with gradio.Column(scale=4):
|
| 34 |
+
sent = gradio.Textbox(label="Input sentence")
|
| 35 |
+
with gradio.Column(scale=1):
|
| 36 |
+
but = gradio.Button("Submit")
|
| 37 |
+
with gradio.Row(equal_height=True):
|
| 38 |
+
with gradio.Column():
|
| 39 |
+
rollout_layer = gradio.Slider(minimum=0, maximum=12, value=8, step=1, label="Select rollout start layer")
|
| 40 |
+
rollout_result = gradio.HTML()
|
| 41 |
+
with gradio.Column():
|
| 42 |
+
ig_layer = gradio.Slider(minimum=0, maximum=12, value=8, step=1, label="Select IG layer")
|
| 43 |
+
ig_result = gradio.HTML()
|
| 44 |
+
gradio.Examples(examples, [sent])
|
| 45 |
+
with gradio.Accordion("A note about explainability models"):
|
| 46 |
+
util.Markdown(pathlib.Path("notice.md"))
|
| 47 |
+
|
| 48 |
+
rollout_layer.change(gr_explainer, [sent, rollout_layer], rollout_result)
|
| 49 |
+
ig_layer.change(ig_explainer, [sent, ig_layer], ig_result)
|
| 50 |
+
but.click(run, [sent, rollout_layer, ig_layer], [rollout_result, ig_result])
|
| 51 |
+
|
| 52 |
|
| 53 |
iface.launch()
|
description.md
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# RoBERTa
|
| 2 |
|
| 3 |
In this demo, we use the RoBERTa language model (optimized for masked language modelling and finetuned for sentiment analysis).
|
| 4 |
The model predicts for a given sentences whether it expresses a positive, negative or neutral sentiment.
|
|
@@ -7,7 +7,7 @@ A range of so-called "attribution methods" have been developed that attempt to d
|
|
| 7 |
they provide a very limited form of "explanation" -- and often disagree -- but sometimes provide good initial hypotheses nevertheless that can be further explored with other methods.
|
| 8 |
|
| 9 |
Abnar & Zuidema (2020) proposed a method for Transformers called "Attention Rollout", which was further refined by Chefer et al. (2021) into Gradient-weighted Rollout.
|
| 10 |
-
Here we compare it to another popular method called Integrated
|
| 11 |
|
| 12 |
* Gradient-weighted attention rollout, as defined by [Hila Chefer](https://github.com/hila-chefer)
|
| 13 |
[(Transformer-MM_explainability)](https://github.com/hila-chefer/Transformer-MM-Explainability/), with rollout recursion upto selected layer
|
|
|
|
| 1 |
+
# Attention Rollout -- RoBERTa
|
| 2 |
|
| 3 |
In this demo, we use the RoBERTa language model (optimized for masked language modelling and finetuned for sentiment analysis).
|
| 4 |
The model predicts for a given sentences whether it expresses a positive, negative or neutral sentiment.
|
|
|
|
| 7 |
they provide a very limited form of "explanation" -- and often disagree -- but sometimes provide good initial hypotheses nevertheless that can be further explored with other methods.
|
| 8 |
|
| 9 |
Abnar & Zuidema (2020) proposed a method for Transformers called "Attention Rollout", which was further refined by Chefer et al. (2021) into Gradient-weighted Rollout.
|
| 10 |
+
Here we compare it to another popular method called Integrated Gradients.
|
| 11 |
|
| 12 |
* Gradient-weighted attention rollout, as defined by [Hila Chefer](https://github.com/hila-chefer)
|
| 13 |
[(Transformer-MM_explainability)](https://github.com/hila-chefer/Transformer-MM-Explainability/), with rollout recursion upto selected layer
|
lib/gradient_rollout.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from transformers import AutoTokenizer
|
| 3 |
+
from captum.attr import visualization
|
| 4 |
+
|
| 5 |
+
from roberta2 import RobertaForSequenceClassification
|
| 6 |
+
from util import visualize_text, PyTMinMaxScalerVectorized
|
| 7 |
+
|
| 8 |
+
classifications = ["NEGATIVE", "POSITIVE"]
|
| 9 |
+
|
| 10 |
+
class GradientRolloutExplainer:
|
| 11 |
+
def __init__(self):
|
| 12 |
+
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 13 |
+
self.model = RobertaForSequenceClassification.from_pretrained("textattack/roberta-base-SST-2").to(self.device)
|
| 14 |
+
self.model.eval()
|
| 15 |
+
self.tokenizer = AutoTokenizer.from_pretrained("textattack/roberta-base-SST-2")
|
| 16 |
+
|
| 17 |
+
def tokens_from_ids(self, ids):
|
| 18 |
+
return list(map(lambda s: s[1:] if s[0] == "Ġ" else s, self.tokenizer.convert_ids_to_tokens(ids)))
|
| 19 |
+
|
| 20 |
+
def run_attribution_model(self, input_ids, attention_mask, index=None, start_layer=0):
|
| 21 |
+
def avg_heads(cam, grad):
|
| 22 |
+
cam = (grad * cam).clamp(min=0).mean(dim=-3)
|
| 23 |
+
# set negative values to 0, then average
|
| 24 |
+
# cam = cam.clamp(min=0).mean(dim=0)
|
| 25 |
+
return cam
|
| 26 |
+
|
| 27 |
+
def apply_self_attention_rules(R_ss, cam_ss):
|
| 28 |
+
R_ss_addition = torch.matmul(cam_ss, R_ss)
|
| 29 |
+
return R_ss_addition
|
| 30 |
+
|
| 31 |
+
output = self.model(input_ids=input_ids, attention_mask=attention_mask)[0]
|
| 32 |
+
if index == None:
|
| 33 |
+
# index = np.expand_dims(np.arange(input_ids.shape[1])
|
| 34 |
+
# by default explain the class with the highest score
|
| 35 |
+
index = output.argmax(axis=-1).detach().cpu().numpy()
|
| 36 |
+
|
| 37 |
+
# create a one-hot vector selecting class we want explanations for
|
| 38 |
+
one_hot = (
|
| 39 |
+
torch.nn.functional.one_hot(
|
| 40 |
+
torch.tensor(index, dtype=torch.int64), num_classes=output.size(-1)
|
| 41 |
+
)
|
| 42 |
+
.to(torch.float)
|
| 43 |
+
.requires_grad_(True)
|
| 44 |
+
).to(self.device)
|
| 45 |
+
one_hot = torch.sum(one_hot * output)
|
| 46 |
+
self.model.zero_grad()
|
| 47 |
+
# create the gradients for the class we're interested in
|
| 48 |
+
one_hot.backward(retain_graph=True)
|
| 49 |
+
|
| 50 |
+
num_tokens = self.model.roberta.encoder.layer[0].attention.self.get_attn().shape[-1]
|
| 51 |
+
R = torch.eye(num_tokens).expand(output.size(0), -1, -1).clone().to(self.device)
|
| 52 |
+
|
| 53 |
+
for i, blk in enumerate(self.model.roberta.encoder.layer):
|
| 54 |
+
if i < start_layer:
|
| 55 |
+
continue
|
| 56 |
+
grad = blk.attention.self.get_attn_gradients()
|
| 57 |
+
cam = blk.attention.self.get_attn()
|
| 58 |
+
cam = avg_heads(cam, grad)
|
| 59 |
+
joint = apply_self_attention_rules(R, cam)
|
| 60 |
+
R += joint
|
| 61 |
+
return output, R[:, 0, 1:-1]
|
| 62 |
+
|
| 63 |
+
def build_visualization(self, input_ids, attention_mask, index=None, start_layer=8):
|
| 64 |
+
# generate an explanation for the input
|
| 65 |
+
vis_data_records = []
|
| 66 |
+
|
| 67 |
+
for index in range(2):
|
| 68 |
+
output, expl = self.run_attribution_model(
|
| 69 |
+
input_ids, attention_mask, index=index, start_layer=start_layer
|
| 70 |
+
)
|
| 71 |
+
# normalize scores
|
| 72 |
+
scaler = PyTMinMaxScalerVectorized()
|
| 73 |
+
|
| 74 |
+
norm = scaler(expl)
|
| 75 |
+
# get the model classification
|
| 76 |
+
output = torch.nn.functional.softmax(output, dim=-1)
|
| 77 |
+
|
| 78 |
+
for record in range(input_ids.size(0)):
|
| 79 |
+
classification = output[record].argmax(dim=-1).item()
|
| 80 |
+
class_name = classifications[classification]
|
| 81 |
+
nrm = norm[record]
|
| 82 |
+
|
| 83 |
+
# if the classification is negative, higher explanation scores are more negative
|
| 84 |
+
# flip for visualization
|
| 85 |
+
#if class_name == "NEGATIVE":
|
| 86 |
+
if index == 0:
|
| 87 |
+
nrm *= -1
|
| 88 |
+
tokens = self.tokens_from_ids(input_ids[record].flatten())[
|
| 89 |
+
1 : 0 - ((attention_mask[record] == 0).sum().item() + 1)
|
| 90 |
+
]
|
| 91 |
+
vis_data_records.append(
|
| 92 |
+
visualization.VisualizationDataRecord(
|
| 93 |
+
nrm,
|
| 94 |
+
output[record][classification],
|
| 95 |
+
classification,
|
| 96 |
+
classification,
|
| 97 |
+
index,
|
| 98 |
+
1,
|
| 99 |
+
tokens,
|
| 100 |
+
1,
|
| 101 |
+
)
|
| 102 |
+
)
|
| 103 |
+
return visualize_text(vis_data_records)
|
| 104 |
+
|
| 105 |
+
def __call__(self, input_text, start_layer=8):
|
| 106 |
+
text_batch = [input_text]
|
| 107 |
+
encoding = self.tokenizer(text_batch, return_tensors="pt")
|
| 108 |
+
input_ids = encoding["input_ids"].to(self.device)
|
| 109 |
+
attention_mask = encoding["attention_mask"].to(self.device)
|
| 110 |
+
|
| 111 |
+
return self.build_visualization(input_ids, attention_mask, start_layer=int(start_layer))
|
| 112 |
+
|
lib/integrated_gradients.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
from transformers import AutoModelForSequenceClassification
|
| 4 |
+
from transformers import AutoTokenizer
|
| 5 |
+
|
| 6 |
+
from captum.attr import LayerIntegratedGradients
|
| 7 |
+
from captum.attr import visualization
|
| 8 |
+
|
| 9 |
+
from util import visualize_text
|
| 10 |
+
|
| 11 |
+
classifications = ["NEGATIVE", "POSITIVE"]
|
| 12 |
+
|
| 13 |
+
class IntegratedGradientsExplainer:
|
| 14 |
+
def __init__(self):
|
| 15 |
+
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 16 |
+
self.model = AutoModelForSequenceClassification.from_pretrained("textattack/roberta-base-SST-2").to(self.device)
|
| 17 |
+
self.tokenizer = AutoTokenizer.from_pretrained("textattack/roberta-base-SST-2")
|
| 18 |
+
self.ref_token_id = self.tokenizer.unk_token_id
|
| 19 |
+
|
| 20 |
+
def tokens_from_ids(self, ids):
|
| 21 |
+
return list(map(lambda s: s[1:] if s[0] == "Ġ" else s, self.tokenizer.convert_ids_to_tokens(ids)))
|
| 22 |
+
|
| 23 |
+
def custom_forward(self, inputs, attention_mask=None, pos=0):
|
| 24 |
+
result = self.model(inputs, attention_mask=attention_mask, return_dict=True)
|
| 25 |
+
preds = result.logits
|
| 26 |
+
return preds
|
| 27 |
+
|
| 28 |
+
@staticmethod
|
| 29 |
+
def summarize_attributions(attributions):
|
| 30 |
+
attributions = attributions.sum(dim=-1).squeeze(0)
|
| 31 |
+
attributions = attributions / torch.norm(attributions)
|
| 32 |
+
return attributions
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def run_attribution_model(self, input_ids, attention_mask, index=None, layer=None, steps=20):
|
| 36 |
+
try:
|
| 37 |
+
output = self.model(input_ids=input_ids, attention_mask=attention_mask)[0]
|
| 38 |
+
if index is None:
|
| 39 |
+
index = output.argmax(axis=-1).item()
|
| 40 |
+
|
| 41 |
+
ablator = LayerIntegratedGradients(self.custom_forward, layer)
|
| 42 |
+
input_tensor = input_ids
|
| 43 |
+
attention_mask = attention_mask
|
| 44 |
+
attributions = ablator.attribute(
|
| 45 |
+
inputs=input_ids,
|
| 46 |
+
baselines=self.ref_token_id,
|
| 47 |
+
additional_forward_args=(attention_mask),
|
| 48 |
+
target=index,
|
| 49 |
+
n_steps=steps,
|
| 50 |
+
)
|
| 51 |
+
return self.summarize_attributions(attributions).unsqueeze_(0), output, index
|
| 52 |
+
finally:
|
| 53 |
+
pass
|
| 54 |
+
|
| 55 |
+
def build_visualization(self, input_ids, attention_mask, **kwargs):
|
| 56 |
+
vis_data_records = []
|
| 57 |
+
attributions, output, index = self.run_attribution_model(input_ids, attention_mask, **kwargs)
|
| 58 |
+
for record in range(input_ids.size(0)):
|
| 59 |
+
classification = output[record].argmax(dim=-1).item()
|
| 60 |
+
class_name = classifications[classification]
|
| 61 |
+
attr = attributions[record]
|
| 62 |
+
tokens = self.tokens_from_ids(input_ids[record].flatten())[
|
| 63 |
+
1 : 0 - ((attention_mask[record] == 0).sum().item() + 1)
|
| 64 |
+
]
|
| 65 |
+
vis_data_records.append(
|
| 66 |
+
visualization.VisualizationDataRecord(
|
| 67 |
+
attr,
|
| 68 |
+
output[record][classification],
|
| 69 |
+
classification,
|
| 70 |
+
classification,
|
| 71 |
+
index,
|
| 72 |
+
1,
|
| 73 |
+
tokens,
|
| 74 |
+
1,
|
| 75 |
+
)
|
| 76 |
+
)
|
| 77 |
+
return visualize_text(vis_data_records)
|
| 78 |
+
|
| 79 |
+
def __call__(self, input_text, layer):
|
| 80 |
+
text_batch = [input_text]
|
| 81 |
+
encoding = self.tokenizer(text_batch, return_tensors="pt")
|
| 82 |
+
input_ids = encoding["input_ids"].to(self.device)
|
| 83 |
+
attention_mask = encoding["attention_mask"].to(self.device)
|
| 84 |
+
layer = int(layer)
|
| 85 |
+
if layer == 0:
|
| 86 |
+
layer = self.model.roberta.embeddings
|
| 87 |
+
else:
|
| 88 |
+
layer = getattr(self.model.roberta.encoder.layer, str(layer-1))
|
| 89 |
+
|
| 90 |
+
return self.build_visualization(input_ids, attention_mask, layer=layer)
|
lib/util.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pathlib
|
| 2 |
+
import gradio
|
| 3 |
+
from captum.attr import visualization
|
| 4 |
+
|
| 5 |
+
class Markdown(gradio.Markdown):
|
| 6 |
+
def __init__(self, value, *args, **kwargs):
|
| 7 |
+
if isinstance(value, pathlib.Path):
|
| 8 |
+
value = value.read_text()
|
| 9 |
+
elif isinstance(value, io.TextIOWrapper):
|
| 10 |
+
value = value.read()
|
| 11 |
+
super().__init__(value, *args, **kwargs)
|
| 12 |
+
|
| 13 |
+
# from https://discuss.pytorch.org/t/using-scikit-learns-scalers-for-torchvision/53455
|
| 14 |
+
class PyTMinMaxScalerVectorized(object):
|
| 15 |
+
"""
|
| 16 |
+
Transforms each channel to the range [0, 1].
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
def __init__(self, dimension=-1):
|
| 20 |
+
self.d = dimension
|
| 21 |
+
|
| 22 |
+
def __call__(self, tensor):
|
| 23 |
+
d = self.d
|
| 24 |
+
scale = 1.0 / (
|
| 25 |
+
tensor.max(dim=d, keepdim=True)[0] - tensor.min(dim=d, keepdim=True)[0]
|
| 26 |
+
)
|
| 27 |
+
tensor.mul_(scale).sub_(tensor.min(dim=d, keepdim=True)[0])
|
| 28 |
+
return tensor
|
| 29 |
+
|
| 30 |
+
# copied out of captum because we need raw html instead of a jupyter widget
|
| 31 |
+
def visualize_text(datarecords, legend=True):
|
| 32 |
+
dom = ["<table width: 100%>"]
|
| 33 |
+
rows = [
|
| 34 |
+
"<tr><th>True Label</th>"
|
| 35 |
+
"<th>Predicted Label</th>"
|
| 36 |
+
"<th>Attribution Label</th>"
|
| 37 |
+
"<th>Attribution Score</th>"
|
| 38 |
+
"<th>Word Importance</th>"
|
| 39 |
+
]
|
| 40 |
+
for datarecord in datarecords:
|
| 41 |
+
rows.append(
|
| 42 |
+
"".join(
|
| 43 |
+
[
|
| 44 |
+
"<tr>",
|
| 45 |
+
visualization.format_classname(datarecord.true_class),
|
| 46 |
+
visualization.format_classname(
|
| 47 |
+
"{0} ({1:.2f})".format(
|
| 48 |
+
datarecord.pred_class, datarecord.pred_prob
|
| 49 |
+
)
|
| 50 |
+
),
|
| 51 |
+
visualization.format_classname(datarecord.attr_class),
|
| 52 |
+
visualization.format_classname(
|
| 53 |
+
"{0:.2f}".format(datarecord.attr_score)
|
| 54 |
+
),
|
| 55 |
+
visualization.format_word_importances(
|
| 56 |
+
datarecord.raw_input_ids, datarecord.word_attributions
|
| 57 |
+
),
|
| 58 |
+
"<tr>",
|
| 59 |
+
]
|
| 60 |
+
)
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
if legend:
|
| 64 |
+
dom.append(
|
| 65 |
+
'<div style="border-top: 1px solid; margin-top: 5px; \
|
| 66 |
+
padding-top: 5px; display: inline-block">'
|
| 67 |
+
)
|
| 68 |
+
dom.append("<b>Legend: </b>")
|
| 69 |
+
|
| 70 |
+
for value, label in zip([-1, 0, 1], ["Negative", "Neutral", "Positive"]):
|
| 71 |
+
dom.append(
|
| 72 |
+
'<span style="display: inline-block; width: 10px; height: 10px; \
|
| 73 |
+
border: 1px solid; background-color: \
|
| 74 |
+
{value}"></span> {label} '.format(
|
| 75 |
+
value=visualization._get_color(value), label=label
|
| 76 |
+
)
|
| 77 |
+
)
|
| 78 |
+
dom.append("</div>")
|
| 79 |
+
|
| 80 |
+
dom.append("".join(rows))
|
| 81 |
+
dom.append("</table>")
|
| 82 |
+
html = "".join(dom)
|
| 83 |
+
|
| 84 |
+
return html
|
| 85 |
+
|
| 86 |
+
|
notice.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[placeholder]
|