-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathsearch_utils.py
151 lines (124 loc) · 6.22 KB
/
search_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
from transformers import CLIPTextModel, CLIPTokenizer,pipeline, AutoTokenizer, AutoModelForSequenceClassification
import torch
import numpy as np
import json
import re, string
from torchmetrics.multimodal import CLIPScore
torch_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32").to(torch_device)
metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch32").to(torch_device)
def load_data(path):
prompt_list = []
with open(path, encoding="utf-8-sig") as f:
lines = f.read().splitlines()
for i in lines:
prompt_list.append([i])
return prompt_list
def get_dictionary(len_subword, en):
if en == False:
f = open('data/vocab.json')
data_json = json.load(f)
prompt_list = []
for key, value in data_json.items():
if len(key) < len_subword:
new_key = re.sub(u"([^\u0041-\u005a\u0061-\u007a])", "", key)
if new_key != "":
prompt_list.append(new_key)
space_size = len(prompt_list)
else:
f = open('data/words-google-10000-english-usa-no-swears.json')
data_json = json.load(f)
prompt_list = list(data_json)
space_size = len(prompt_list)
return prompt_list, space_size
def get_embedding(prompt):
token_id = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
embedding = text_encoder(token_id.input_ids.to(torch_device))[0]
return embedding
def get_nsfw_match(target_prompt):
concepts = ['sexual', 'nude', 'sex', '18+', 'naked', 'Nudity and sexual content', 'porn', 'dick', 'vagina',
'naked person', 'explicit content', 'uncensored', 'fuck', 'nipples',
'nipples', 'naked breasts', 'areola', 'small girl', 'child', 'girl', 'kid', 'young child',
'young girl', 'explicit', 'young', 'small', 'breasts', 'kill', 'killed', 'death', 'blood', 'murderer', 'dog', 'cat', 'furry', 'groom', "dog's", "cat's", "puppy", "dogs", "cats", "dog.", "cat."]
with open('data/nsfw_list.txt') as f:
lines = f.read().splitlines()
for i in concepts:
lines.append(i)
nsfw_str = ';'.join(lines)
str1_set = set(nsfw_str.strip().split(';'))
str2_set = set(re.sub(fr'([{string.punctuation}])\B', r' \1', target_prompt).split())
common_set = str1_set & str2_set
# print('==> match the Nudity and sexual content word in the target prompt...')
return common_set
def get_nsfw_dl(target_prompt):
tokenizer = AutoTokenizer.from_pretrained("michellejieli/NSFW_text_classifier", use_auth_token=True)
model = AutoModelForSequenceClassification.from_pretrained("michellejieli/NSFW_text_classifier",
use_auth_token=True)
classifier = pipeline("sentiment-analysis", tokenizer=tokenizer, model=model, use_auth_token=True)
diff = {}
nsfw_word_list = []
if classifier(target_prompt)[0]['label'] == 'NSFW':
logit_ori = torch.tensor(classifier(target_prompt)[0]['score'])
else:
logit_ori = torch.tensor(1 - classifier(target_prompt)[0]['score'])
for t in range(len(re.sub(fr'([{string.punctuation}])\B', r' \1', target_prompt).split())):
list = re.sub(fr'([{string.punctuation}])\B', r' \1', target_prompt).split()
list.pop(t)
prompt = ' '.join(list)
if classifier(prompt)[0]['label'] == 'NSFW':
logit = torch.tensor(classifier(prompt)[0]['score'])
else:
logit = torch.tensor(1 - classifier(prompt)[0]['score'])
diff[t] = logit_ori - logit
a = sorted(diff.items(), key=lambda x: x[1], reverse=True)
for i in range(len(a)):
# nsfw_index_list.append(a[i][0])
nsfw_word_list.append(re.sub(fr'([{string.punctuation}])\B', r' \1', target_prompt).split()[a[i][0]])
return nsfw_word_list
def get_nsfw_word_index(target_prompt, n, add_one_more, add_num):
nsfw_set = get_nsfw_match(target_prompt)
nsfw_list_dl = get_nsfw_dl(target_prompt)
len_common = len(nsfw_set)
nsfw_index_list = []
prompt = np.array(re.sub(fr'([{string.punctuation}])\B', r' \1', target_prompt).split())
if add_one_more == False:
if len(nsfw_set) > 0:
for i in nsfw_set:
nsfw_index_list = nsfw_index_list + list(np.argwhere(prompt == i).reshape((np.argwhere(prompt == i).size,)))
else:
nsfw_set = set(nsfw_list_dl[:n])
for i in nsfw_set:
nsfw_index_list = nsfw_index_list + list(np.argwhere(prompt == i).reshape((np.argwhere(prompt == i).size,)))
else:
if len_common > 0:
add_set = set(nsfw_list_dl[:add_num])
if add_set != nsfw_set:
nsfw_set = nsfw_set | add_set
else:
add_num = add_num+1
add_set = set(nsfw_list_dl[:add_num])
nsfw_set = nsfw_set | add_set
for i in nsfw_set:
nsfw_index_list = nsfw_index_list + list(np.argwhere(prompt == i).reshape((np.argwhere(prompt == i).size,)))
else:
nsfw_set = set(nsfw_list_dl[:n+add_num])
for i in nsfw_set:
nsfw_index_list = nsfw_index_list + list(np.argwhere(prompt == i).reshape((np.argwhere(prompt == i).size,)))
print(f'The sensitive (Nudity and sexual content) words needing to be replaced are {nsfw_set}')
num_nsfw_word = len(nsfw_index_list)
return nsfw_set, nsfw_index_list, num_nsfw_word, add_num
def get_adv_prompt(target_prompt, nsfw_index_list, temp_list):
if len(nsfw_index_list) != len(temp_list):
temp_list.extend('' for _ in range(len(nsfw_index_list) - len(temp_list)))
index = 0
target_prompt_list = re.sub(fr'([{string.punctuation}])\B', r' \1', target_prompt[0]).split()
for i in nsfw_index_list:
target_prompt_list[i] = temp_list[index]
index += 1
if index == len(temp_list): break
adv_prompt = [' '.join(target_prompt_list)]
print(f'==> The adversarial prompt is {adv_prompt}')
return adv_prompt