anonymous8
commited on
Commit
·
b6bde4a
1
Parent(s):
4f6b345
update
Browse files
app.py
CHANGED
|
@@ -188,18 +188,19 @@ if __name__ == "__main__":
|
|
| 188 |
- The adversarial example and repaired adversarial example may be unnatural to read, while it is because the attackers usually generate unnatural perturbations. RPD does not introduce additional unnatural perturbations.
|
| 189 |
- To our best knowledge, Reactive Perturbation Defocusing is a novel approach in adversarial defense. RPD significantly (>10% defense accuracy improvement) outperforms the state-of-the-art methods.
|
| 190 |
- The DeepWordBug is an unknown attacker to the adversarial detector and reactive defense module. DeepWordBug has different attacking patterns from other attackers and shows the generalizability and robustness of RPD.
|
|
|
|
| 191 |
""")
|
| 192 |
gr.Markdown("<h2 align='center'>Natural Example Input</h2>")
|
| 193 |
with gr.Group():
|
| 194 |
with gr.Row():
|
| 195 |
input_dataset = gr.Radio(
|
| 196 |
choices=["SST2", "AGNews10K", "Amazon"],
|
| 197 |
-
value="
|
| 198 |
label="Select a testing dataset and an adversarial attacker to generate an adversarial example.",
|
| 199 |
)
|
| 200 |
input_attacker = gr.Radio(
|
| 201 |
choices=["BAE", "PWWS", "TextFooler", "DeepWordBug"],
|
| 202 |
-
value="
|
| 203 |
label="Choose an Adversarial Attacker for generating an adversarial example to attack the model.",
|
| 204 |
)
|
| 205 |
with gr.Group():
|
|
@@ -213,7 +214,7 @@ if __name__ == "__main__":
|
|
| 213 |
)
|
| 214 |
|
| 215 |
button_gen = gr.Button(
|
| 216 |
-
"Generate an adversarial example to repair using RPD (
|
| 217 |
variant="primary",
|
| 218 |
)
|
| 219 |
|
|
|
|
| 188 |
- The adversarial example and repaired adversarial example may be unnatural to read, while it is because the attackers usually generate unnatural perturbations. RPD does not introduce additional unnatural perturbations.
|
| 189 |
- To our best knowledge, Reactive Perturbation Defocusing is a novel approach in adversarial defense. RPD significantly (>10% defense accuracy improvement) outperforms the state-of-the-art methods.
|
| 190 |
- The DeepWordBug is an unknown attacker to the adversarial detector and reactive defense module. DeepWordBug has different attacking patterns from other attackers and shows the generalizability and robustness of RPD.
|
| 191 |
+
- To help the review & evaluation of ACL2023, we will host this demo on a GPU device to speed up the inference process in the next month. Then it will be deployed on a CPU device in the future.
|
| 192 |
""")
|
| 193 |
gr.Markdown("<h2 align='center'>Natural Example Input</h2>")
|
| 194 |
with gr.Group():
|
| 195 |
with gr.Row():
|
| 196 |
input_dataset = gr.Radio(
|
| 197 |
choices=["SST2", "AGNews10K", "Amazon"],
|
| 198 |
+
value="SST2",
|
| 199 |
label="Select a testing dataset and an adversarial attacker to generate an adversarial example.",
|
| 200 |
)
|
| 201 |
input_attacker = gr.Radio(
|
| 202 |
choices=["BAE", "PWWS", "TextFooler", "DeepWordBug"],
|
| 203 |
+
value="PWWS",
|
| 204 |
label="Choose an Adversarial Attacker for generating an adversarial example to attack the model.",
|
| 205 |
)
|
| 206 |
with gr.Group():
|
|
|
|
| 214 |
)
|
| 215 |
|
| 216 |
button_gen = gr.Button(
|
| 217 |
+
"Generate an adversarial example to repair using RPD (GPU: < 1 minute, CPU: 1-10 minutes)",
|
| 218 |
variant="primary",
|
| 219 |
)
|
| 220 |
|
textattack/constraints/semantics/sentence_encoders/universal_sentence_encoder/multilingual_universal_sentence_encoder.py
CHANGED
|
@@ -20,14 +20,19 @@ class MultilingualUniversalSentenceEncoder(SentenceEncoder):
|
|
| 20 |
tensorflow_text._load()
|
| 21 |
if large:
|
| 22 |
tfhub_url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3"
|
|
|
|
| 23 |
else:
|
| 24 |
-
tfhub_url =
|
| 25 |
-
|
| 26 |
-
)
|
| 27 |
|
| 28 |
-
# TODO add QA SET. Details at: https://
|
| 29 |
self._tfhub_url = tfhub_url
|
| 30 |
-
self.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
def encode(self, sentences):
|
| 33 |
return self.model(sentences).numpy()
|
|
@@ -39,4 +44,8 @@ class MultilingualUniversalSentenceEncoder(SentenceEncoder):
|
|
| 39 |
|
| 40 |
def __setstate__(self, state):
|
| 41 |
self.__dict__ = state
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
tensorflow_text._load()
|
| 21 |
if large:
|
| 22 |
tfhub_url = "https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3"
|
| 23 |
+
mirror_tfhub_url = "https://hub.tensorflow.google.cn/google/universal-sentence-encoder-multilingual-large/3"
|
| 24 |
else:
|
| 25 |
+
tfhub_url = "https://https://tfhub.dev/google/universal-sentence-encoder-multilingual/3"
|
| 26 |
+
mirror_tfhub_url = "https://hub.tensorflow.google.cn/google/universal-sentence-encoder-multilingual/3"
|
|
|
|
| 27 |
|
| 28 |
+
# TODO add QA SET. Details at: https://hub.tensorflow.google.cn/google/universal-sentence-encoder-multilingual-qa/3
|
| 29 |
self._tfhub_url = tfhub_url
|
| 30 |
+
self.mirror_tfhub_url = mirror_tfhub_url
|
| 31 |
+
try:
|
| 32 |
+
self.model = hub.load(self._tfhub_url)
|
| 33 |
+
except Exception as e:
|
| 34 |
+
print('Error loading model from tfhub, trying mirror url')
|
| 35 |
+
self.model = hub.load(self.mirror_tfhub_url)
|
| 36 |
|
| 37 |
def encode(self, sentences):
|
| 38 |
return self.model(sentences).numpy()
|
|
|
|
| 44 |
|
| 45 |
def __setstate__(self, state):
|
| 46 |
self.__dict__ = state
|
| 47 |
+
try:
|
| 48 |
+
self.model = hub.load(self._tfhub_url)
|
| 49 |
+
except Exception as e:
|
| 50 |
+
print('Error loading model from tfhub, trying mirror url')
|
| 51 |
+
self.model = hub.load(self.mirror_tfhub_url)
|
textattack/constraints/semantics/sentence_encoders/universal_sentence_encoder/universal_sentence_encoder.py
CHANGED
|
@@ -18,22 +18,26 @@ class UniversalSentenceEncoder(SentenceEncoder):
|
|
| 18 |
super().__init__(threshold=threshold, metric=metric, **kwargs)
|
| 19 |
if large:
|
| 20 |
tfhub_url = "https://tfhub.dev/google/universal-sentence-encoder-large/5"
|
|
|
|
| 21 |
else:
|
| 22 |
-
tfhub_url = "https://tfhub.dev/google/universal-sentence-encoder/
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
self._tfhub_url = tfhub_url
|
|
|
|
| 25 |
# Lazily load the model
|
| 26 |
self.model = None
|
| 27 |
|
| 28 |
def encode(self, sentences):
|
| 29 |
if not self.model:
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
return encoding.numpy()
|
| 37 |
|
| 38 |
def __getstate__(self):
|
| 39 |
state = self.__dict__.copy()
|
|
@@ -42,4 +46,8 @@ class UniversalSentenceEncoder(SentenceEncoder):
|
|
| 42 |
|
| 43 |
def __setstate__(self, state):
|
| 44 |
self.__dict__ = state
|
| 45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
super().__init__(threshold=threshold, metric=metric, **kwargs)
|
| 19 |
if large:
|
| 20 |
tfhub_url = "https://tfhub.dev/google/universal-sentence-encoder-large/5"
|
| 21 |
+
mirror_tfhub_url = "https://hub.tensorflow.google.cn/google/universal-sentence-encoder-large/5"
|
| 22 |
else:
|
| 23 |
+
tfhub_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
|
| 24 |
+
mirror_tfhub_url = (
|
| 25 |
+
"https://hub.tensorflow.google.cn/google/universal-sentence-encoder/4"
|
| 26 |
+
)
|
| 27 |
|
| 28 |
self._tfhub_url = tfhub_url
|
| 29 |
+
self.mirror_tfhub_url = mirror_tfhub_url
|
| 30 |
# Lazily load the model
|
| 31 |
self.model = None
|
| 32 |
|
| 33 |
def encode(self, sentences):
|
| 34 |
if not self.model:
|
| 35 |
+
try:
|
| 36 |
+
self.model = hub.load(self._tfhub_url)
|
| 37 |
+
except Exception as e:
|
| 38 |
+
print('Error loading model from tfhub, trying mirror url')
|
| 39 |
+
self.model = hub.load(self.mirror_tfhub_url)
|
| 40 |
+
return self.model(sentences).numpy()
|
|
|
|
| 41 |
|
| 42 |
def __getstate__(self):
|
| 43 |
state = self.__dict__.copy()
|
|
|
|
| 46 |
|
| 47 |
def __setstate__(self, state):
|
| 48 |
self.__dict__ = state
|
| 49 |
+
try:
|
| 50 |
+
self.model = hub.load(self._tfhub_url)
|
| 51 |
+
except Exception as e:
|
| 52 |
+
print('Error loading model from tfhub, trying mirror url')
|
| 53 |
+
self.model = hub.load(self.mirror_tfhub_url)
|
utils.py
CHANGED
|
@@ -99,7 +99,7 @@ def get_sst2_example():
|
|
| 99 |
label = int(label.strip())
|
| 100 |
data.append((text, label))
|
| 101 |
label_set.add(label)
|
| 102 |
-
return
|
| 103 |
|
| 104 |
|
| 105 |
def get_agnews_example():
|
|
@@ -142,7 +142,7 @@ def get_agnews_example():
|
|
| 142 |
label = int(label.strip())
|
| 143 |
data.append((text, label))
|
| 144 |
label_set.add(label)
|
| 145 |
-
return
|
| 146 |
|
| 147 |
|
| 148 |
def get_amazon_example():
|
|
@@ -186,7 +186,7 @@ def get_amazon_example():
|
|
| 186 |
label = int(label.strip())
|
| 187 |
data.append((text, label))
|
| 188 |
label_set.add(label)
|
| 189 |
-
return
|
| 190 |
|
| 191 |
|
| 192 |
def get_imdb_example():
|
|
@@ -230,5 +230,5 @@ def get_imdb_example():
|
|
| 230 |
label = int(label.strip())
|
| 231 |
data.append((text, label))
|
| 232 |
label_set.add(label)
|
| 233 |
-
return
|
| 234 |
|
|
|
|
| 99 |
label = int(label.strip())
|
| 100 |
data.append((text, label))
|
| 101 |
label_set.add(label)
|
| 102 |
+
return random.choice(data)
|
| 103 |
|
| 104 |
|
| 105 |
def get_agnews_example():
|
|
|
|
| 142 |
label = int(label.strip())
|
| 143 |
data.append((text, label))
|
| 144 |
label_set.add(label)
|
| 145 |
+
return random.choice(data)
|
| 146 |
|
| 147 |
|
| 148 |
def get_amazon_example():
|
|
|
|
| 186 |
label = int(label.strip())
|
| 187 |
data.append((text, label))
|
| 188 |
label_set.add(label)
|
| 189 |
+
return random.choice(data)
|
| 190 |
|
| 191 |
|
| 192 |
def get_imdb_example():
|
|
|
|
| 230 |
label = int(label.strip())
|
| 231 |
data.append((text, label))
|
| 232 |
label_set.add(label)
|
| 233 |
+
return random.choice(data)
|
| 234 |
|