|
import textattack
|
|
from textattack.shared import AttackedText
|
|
from sklearn.cluster import KMeans
|
|
from sklearn.metrics import silhouette_score
|
|
from math import floor, sqrt
|
|
import csv
|
|
|
|
class Clustering:
|
|
def __init__(self, file_, victim_model_wrapper, victim_model, attack):
|
|
self.file = file_
|
|
self.victim_model_wrapper = victim_model_wrapper
|
|
self.victim_model = victim_model
|
|
self.attack = attack
|
|
|
|
def get_embedding_layer(self, model, text_input):
|
|
if isinstance(model, textattack.models.helpers.T5ForTextToText):
|
|
raise NotImplementedError(
|
|
"`get_grads` for T5FotTextToText has not been implemented yet."
|
|
)
|
|
|
|
model.train()
|
|
embedding_layer = model.get_input_embeddings()
|
|
embedding_layer.weight.requires_grad = True
|
|
|
|
model.zero_grad()
|
|
model_device = next(model.parameters()).device
|
|
input_dict = tokenizer(
|
|
[text_input],
|
|
add_special_tokens=True,
|
|
return_tensors="pt",
|
|
padding="max_length",
|
|
truncation=True,
|
|
)
|
|
input_dict = input_dict.to(model_device)
|
|
embedding = embedding_layer(input_dict["input_ids"])
|
|
|
|
return embedding
|
|
|
|
def prepare_sentences(self):
|
|
file = self.file
|
|
victim_model = self.victim_model
|
|
victim_model_wrapper = self.victim_model_wrapper
|
|
attack = self.attack
|
|
with open(file, "r") as f:
|
|
data = json.load(f)
|
|
global_sentences = []
|
|
global_masks = []
|
|
global_scores = []
|
|
for item in data["data"]:
|
|
original_words = item["original"].split()
|
|
|
|
|
|
sentences = []
|
|
masks = []
|
|
scores = []
|
|
_, indices_to_order = attack.get_indices_to_order(
|
|
AttackedText(item["original"])
|
|
)
|
|
for sample in item["samples"]:
|
|
scores.append(sample["score"])
|
|
attacked_text = AttackedText(sample["attacked_text"])
|
|
word2token_mapping_0 = attacked_text.align_with_model_tokens(
|
|
victim_model_wrapper
|
|
)
|
|
embedding_0 = self.get_embedding_layer(
|
|
model=victim_model, text_input=sample["attacked_text"]
|
|
)
|
|
embedding_vectors_0 = embedding_0[0].detach().cpu().numpy()
|
|
|
|
sentence_embedding = []
|
|
mask = []
|
|
for _, idx in enumerate(indices_to_order):
|
|
|
|
matched_tokens_0 = word2token_mapping_0[idx]
|
|
embedding_from_layer = np.mean(
|
|
embedding_vectors_0[matched_tokens_0], axis=0
|
|
)
|
|
if original_words[idx] != attacked_text.words[idx]:
|
|
mask.append(1)
|
|
sentence_embedding.append(embedding_from_layer)
|
|
else:
|
|
sentence_embedding.append(embedding_from_layer)
|
|
mask.append(0)
|
|
sentences.append(sentence_embedding)
|
|
masks.append(mask)
|
|
global_sentences.append(sentences)
|
|
global_masks.append(masks)
|
|
global_scores.append(scores)
|
|
return global_sentences, global_masks, global_scores
|
|
|
|
def get_unified_mask(self, masks):
|
|
|
|
unified_mask = np.zeros_like(masks[0])
|
|
for mask in masks:
|
|
|
|
unified_mask = np.logical_or(unified_mask, mask)
|
|
|
|
return unified_mask.astype(int)
|
|
|
|
def get_global_unified_masks(self, masks):
|
|
global_unified_masks = [self.get_unified_mask(masks=mask) for mask in masks]
|
|
|
|
return global_unified_masks
|
|
|
|
def apply_mask_on_vectors(self, sentences, mask):
|
|
for i in range(len(sentences)):
|
|
sentence = sentences[i]
|
|
|
|
sentences[i] = [
|
|
sentence[j] if mask[j] == 1 else np.zeros_like(sentence[j])
|
|
for j in range(len(sentence))
|
|
]
|
|
|
|
return sentences
|
|
|
|
def apply_mask_on_global_vectors(self, global_sentences, unified_masks):
|
|
|
|
return [
|
|
self.apply_mask_on_vectors(sentences, mask)
|
|
for sentences, mask in zip(global_sentences, unified_masks)
|
|
]
|
|
|
|
def matrix_to_sentences(self, matrix_sentences):
|
|
|
|
return np.vstack([np.concatenate(sentence) for sentence in matrix_sentences])
|
|
|
|
def global_matrix_to_global_sentences(self, global_matrix_sentences):
|
|
|
|
return [
|
|
self.matrix_to_sentences(sentences) for sentences in global_matrix_sentences
|
|
]
|
|
|
|
def find_best_clustering(self, sentences, max_clusters, method="silhouette"):
|
|
if method == "silhouette":
|
|
max_silhouette_avg = -1
|
|
final_cluster_labels = None
|
|
best_k = 2
|
|
|
|
for num_clusters in range(1, max_clusters + 1):
|
|
|
|
kmeans = KMeans(n_clusters=num_clusters).fit(sentences)
|
|
|
|
cluster_labels = kmeans.labels_
|
|
silhouette_avg = silhouette_score(sentences, cluster_labels)
|
|
|
|
if silhouette_avg > max_silhouette_avg:
|
|
max_silhouette_avg = silhouette_avg
|
|
final_cluster_labels = cluster_labels
|
|
best_k = num_clusters
|
|
|
|
return kmeans.cluster_centers_, final_cluster_labels
|
|
elif method == "thumb-rule":
|
|
|
|
best_k = floor(sqrt(len(sentences)/2)) + 1
|
|
kmeans = KMeans(n_clusters=best_k).fit(sentences)
|
|
|
|
return kmeans.cluster_centers_, kmeans.labels_
|
|
elif "custom":
|
|
best_k = 5
|
|
kmeans = KMeans(n_clusters=best_k).fit(sentences)
|
|
|
|
return kmeans.cluster_centers_, kmeans.labels_
|
|
|
|
def find_global_best_clustering(
|
|
self, global_sentences, max_clusters_per_group, method
|
|
):
|
|
return [
|
|
self.find_best_clustering(
|
|
sentences,
|
|
min(len(sentences) - 1, max_clusters_per_group),
|
|
method=method,
|
|
)
|
|
for sentences in global_sentences
|
|
]
|
|
|
|
def get_global_distances(self, sentences, global_clustering):
|
|
global_distances = []
|
|
for X, clustering in zip(sentences, global_clustering):
|
|
centroids = clustering[0]
|
|
labels = clustering[1]
|
|
global_distances.append(
|
|
[
|
|
np.sqrt(np.sum((X[i] - centroids[labels[i]]) ** 2))
|
|
for i in range(len(X))
|
|
]
|
|
)
|
|
return global_distances
|
|
|
|
def select_diverce_samples(self, scores, distances, clustering):
|
|
|
|
scores_ = np.array(scores)
|
|
distances_ = np.array(distances)
|
|
labels_ = np.array(clustering)
|
|
selected_samples = []
|
|
|
|
normalized_distances = (distances_) / sum(distances_)
|
|
|
|
finalscores = scores / normalized_distances
|
|
|
|
clusters = np.unique(labels_)
|
|
|
|
for cluster in clusters:
|
|
|
|
indices = np.where(labels_ == cluster)[0]
|
|
|
|
cluster_finalscores = finalscores[indices]
|
|
|
|
best_sample_index = indices[np.argmin(cluster_finalscores)]
|
|
|
|
selected_samples.append(best_sample_index)
|
|
return selected_samples
|
|
|
|
def global_select_diverce_sample(self, global_scores, sentences, global_clustering):
|
|
global_distances = self.get_global_distances(sentences, global_clustering)
|
|
labels_ = [X[1] for X in global_clustering]
|
|
|
|
return [
|
|
self.select_diverce_samples(scores, distances, clustering)
|
|
for scores, distances, clustering in zip(
|
|
global_scores, global_distances, labels_
|
|
)
|
|
]
|
|
|
|
def save_json(self, selected_samples, output):
|
|
|
|
data = json.load(open(self.file))
|
|
|
|
selected_data = []
|
|
|
|
for item, indices in zip(data["data"], selected_samples):
|
|
|
|
new_item = item.copy()
|
|
|
|
new_item["samples"] = [item["samples"][i] for i in indices]
|
|
|
|
selected_data.append(new_item)
|
|
|
|
with open(output, "w") as f:
|
|
json.dump({"data": selected_data}, f)
|
|
|
|
def save_csv(self, selected_samples, ground_truth_output, train_file):
|
|
|
|
with open(self.file) as f:
|
|
data = json.load(f)["data"]
|
|
|
|
with open(train_file, 'a', newline='') as f:
|
|
writer = csv.writer(f)
|
|
|
|
for item, indices in zip(data, selected_samples):
|
|
|
|
samples = [item["samples"][i] for i in indices]
|
|
|
|
for sample in samples:
|
|
row = [sample, ground_truth_output]
|
|
writer.writerow(row) |