{ "cells": [ { "cell_type": "markdown", "id": "bb8a383e-7d0c-454d-8764-4e3d975b2187", "metadata": {}, "source": [ "# Experiment List:\n", "1. Baseline Performance of Neural Models\n", "2. Wisdom of The Crowd Experiment\n", "3. Using \"Polarization\" as a Feature for Toxicity Detection\n", "4. Incorporating Demographic Information\n", "5. Combining Polarization and Demographic Information for Toxicity Detection" ] }, { "cell_type": "markdown", "id": "df99292a-e96d-4d56-b16f-c8fa1a2fd2ea", "metadata": {}, "source": [ "## 1. Baseline Performance of Neural Models" ] }, { "cell_type": "markdown", "id": "8a1ca696-e05c-4c37-a8c9-d99095e4c2d8", "metadata": {}, "source": [ "### For IndoBERTweet and NusaBERT" ] }, { "cell_type": "code", "execution_count": null, "id": "8e7edb12-8ae7-4c6a-9201-3a2a76890222", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import ast\n", "import os\n", "import numpy as np\n", "from sklearn.model_selection import StratifiedKFold\n", "from transformers import Trainer, TrainingArguments, BertForSequenceClassification, BertTokenizer\n", "import torch\n", "\n", "from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, average_precision_score\n", "\n", "def compute_metrics(pred):\n", " labels = pred.label_ids\n", " preds = pred.predictions.argmax(-1)\n", "\n", " # Accuracy\n", " accuracy = accuracy_score(labels, preds)\n", "\n", " # Macro F1, Precision, and Recall\n", " macro_f1 = f1_score(labels, preds, average='macro')\n", " precision = precision_score(labels, preds, average='macro')\n", " recall = recall_score(labels, preds, average='macro')\n", "\n", " # Class-1 only metrics (positive class)\n", " precision_class_1 = precision_score(labels, preds, pos_label=1)\n", " recall_class_1 = recall_score(labels, preds, pos_label=1)\n", " f1_class_1 = f1_score(labels, preds, pos_label=1)\n", "\n", " # Class-0 only metrics (negative class)\n", " precision_class_0 = precision_score(labels, preds, pos_label=0)\n", " recall_class_0 = recall_score(labels, preds, pos_label=0)\n", " f1_class_0 = f1_score(labels, preds, pos_label=0)\n", "\n", " # ROC-AUC score for binary classification\n", " try:\n", " # Compute the ROC AUC score for binary classification directly\n", " roc_auc = roc_auc_score(labels, preds)\n", " except ValueError:\n", " # In case there's an issue with the labels or predictions (e.g., all labels are the same)\n", " roc_auc = 0.5 # This would represent random classification if AUC can't be computed\n", "\n", " # Precision-Recall AUC\n", " precision_recall_auc = average_precision_score(labels, preds)\n", "\n", " return {\n", " 'accuracy': accuracy,\n", " 'macro_f1': macro_f1,\n", " 'precision': precision,\n", " 'recall': recall,\n", " 'precision_class_1': precision_class_1,\n", " 'recall_class_1': recall_class_1,\n", " 'f1_class_1': f1_class_1,\n", " 'precision_class_0': precision_class_0,\n", " 'recall_class_0': recall_class_0,\n", " 'f1_class_0': f1_class_0,\n", " 'roc_auc': roc_auc,\n", " 'precision_recall_auc': precision_recall_auc,\n", " }\n", "\n", "def wisdom_text_handler(merged_df):\n", " texts = merged_df['text'].tolist()\n", " labels = merged_df['label'].tolist()\n", " annot_counts = merged_df['annotator_count'].astype(int).tolist()\n", " return texts, labels, annot_counts\n", "\n", "def wisdom_any_text_handler(merged_df):\n", " texts = merged_df['text'].tolist()\n", " merged_df['polarized'] = merged_df['polarized'].apply(lambda x: [int(y) for y in ast.literal_eval(x)])\n", " merged_df['polarized_value'] = merged_df['polarized'].apply(lambda x: sum(x)/len(x))\n", " merged_df['any_label'] = merged_df['polarized_value'].apply(lambda x: 1 if x > 0 else 0) \n", " any_label = merged_df['any_label'].tolist()\n", " labels = merged_df['label'].tolist()\n", " annot_counts = merged_df['annotator_count'].astype(int).tolist()\n", " return texts, labels, annot_counts, any_label\n", "\n", "def train_wisdom_bert_pipeline(model_path: str, merged_df, output_dir: str, approach: str = \"single\"):\n", "\n", " # Handle texts and labels:\n", " texts, labels, annot_counts = wisdom_text_handler(merged_df)\n", "\n", " # Create output directory\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n", " metrics_list = []\n", "\n", " for fold, (train_index, test_index) in enumerate(skf.split(texts, labels)):\n", " train_annot_counts = np.array(annot_counts)[train_index]\n", " train_texts, test_texts = np.array(texts)[train_index], np.array(texts)[test_index]\n", " train_labels, test_labels = np.array(labels)[train_index], np.array(labels)[test_index]\n", " target_annot_count = 1\n", " if approach == \"single\":\n", " train_indices = np.where(train_annot_counts == target_annot_count)[0]\n", " train_texts = np.array(train_texts)[train_indices]\n", " train_labels = np.array(train_labels)[train_indices]\n", " elif approach == \"more\":\n", " train_indices = np.where(train_annot_counts != target_annot_count)[0]\n", " train_texts = np.array(train_texts)[train_indices]\n", " train_labels = np.array(train_labels)[train_indices]\n", " else:\n", " raise ValueError(f\"Invalid approach: {approach}\")\n", " # Tokenize\n", " tokenizer = BertTokenizer.from_pretrained(model_path)\n", " train_encodings = tokenizer(train_texts.tolist(), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", " test_encodings = tokenizer(test_texts.tolist(), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", "\n", " train_dataset = Dataset(train_encodings, train_labels)\n", " test_dataset = Dataset(test_encodings, test_labels)\n", "\n", " model = BertForSequenceClassification.from_pretrained(model_path, num_labels=len(set(labels)))\n", "\n", " training_args = TrainingArguments(\n", " output_dir=os.path.join(output_dir, f\"model_fold_{fold}\"),\n", " evaluation_strategy=\"epoch\",\n", " per_device_train_batch_size=16,\n", " per_device_eval_batch_size=64,\n", " num_train_epochs=3,\n", " logging_dir=os.path.join(output_dir, f\"logs_fold_{fold}\"),\n", " )\n", "\n", " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", " train_dataset=train_dataset,\n", " eval_dataset=test_dataset,\n", " compute_metrics=compute_metrics,\n", " )\n", "\n", " # Train and evaluate\n", " trainer.train()\n", " metrics = trainer.evaluate()\n", " metrics_list.append(metrics)\n", "\n", " # Save model\n", " model.save_pretrained(os.path.join(output_dir, f\"model_fold_{fold}\"))\n", " tokenizer.save_pretrained(os.path.join(output_dir, f\"model_fold_{fold}\"))\n", "\n", " # Save performance report\n", " pd.DataFrame([metrics]).to_csv(os.path.join(output_dir, f\"performance_fold_{fold}.csv\"), index=False)\n", "\n", " # Calculate average performance metrics\n", " avg_metrics = {metric: np.mean([m[metric] for m in metrics_list]) for metric in metrics_list[0]}\n", " pd.DataFrame([avg_metrics]).to_csv(os.path.join(output_dir, \"average_performance.csv\"), index=False)\n", "\n", "# Dataset class to handle encoding\n", "class Dataset(torch.utils.data.Dataset):\n", " def __init__(self, encodings, labels):\n", " self.encodings = encodings\n", " self.labels = [int(label) for label in labels] \n", "\n", " def __getitem__(self, idx):\n", " item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n", " item['labels'] = torch.tensor(self.labels[idx])\n", " return item\n", "\n", " def __len__(self):\n", " return len(self.labels)" ] }, { "cell_type": "markdown", "id": "511649ab-4c9f-4513-a61a-da480bec3f07", "metadata": {}, "source": [ "### For Multi-e5" ] }, { "cell_type": "code", "execution_count": null, "id": "3a224add-ffe1-4f29-843b-92bea15aa07e", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import ast\n", "import os\n", "import numpy as np\n", "import torch\n", "from sklearn.model_selection import StratifiedKFold\n", "from transformers import Trainer, TrainingArguments, XLMRobertaForSequenceClassification, XLMRobertaTokenizer\n", "\n", "def baseline_text_handler(merged_df):\n", " texts = merged_df['text'].tolist()\n", " labels = merged_df['label'].tolist()\n", " return texts, labels\n", "\n", "def train_baseline_XLMRoberta_pipeline(model_path: str, merged_df, output_dir: str):\n", "\n", " # Handle texts and labels:\n", " texts, labels = baseline_text_handler(merged_df)\n", "\n", " # Create output directory\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n", " metrics_list = []\n", "\n", " for fold, (train_index, test_index) in enumerate(skf.split(texts, labels)):\n", " train_texts, test_texts = np.array(texts)[train_index], np.array(texts)[test_index]\n", " train_labels, test_labels = np.array(labels)[train_index], np.array(labels)[test_index]\n", "\n", " # Tokenize\n", " tokenizer = XLMRobertaTokenizer.from_pretrained(model_path)\n", " train_encodings = tokenizer(train_texts.tolist(), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", " test_encodings = tokenizer(test_texts.tolist(), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", "\n", " train_dataset = Dataset(train_encodings, train_labels)\n", " test_dataset = Dataset(test_encodings, test_labels)\n", "\n", " model = XLMRobertaForSequenceClassification.from_pretrained(model_path, num_labels=len(set(labels)))\n", "\n", " training_args = TrainingArguments(\n", " output_dir=os.path.join(output_dir, f\"model_fold_{fold}\"),\n", " evaluation_strategy=\"epoch\",\n", " per_device_train_batch_size=16,\n", " per_device_eval_batch_size=64,\n", " num_train_epochs=3,\n", " logging_dir=os.path.join(output_dir, f\"logs_fold_{fold}\"),\n", " )\n", "\n", " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", " train_dataset=train_dataset,\n", " eval_dataset=test_dataset,\n", " compute_metrics=compute_metrics,\n", " )\n", "\n", " # Train and evaluate\n", " trainer.train()\n", " metrics = trainer.evaluate()\n", " metrics_list.append(metrics)\n", "\n", " # Save model\n", " model.save_pretrained(os.path.join(output_dir, f\"model_fold_{fold}\"))\n", " tokenizer.save_pretrained(os.path.join(output_dir, f\"model_fold_{fold}\"))\n", "\n", " # Save performance report\n", " pd.DataFrame([metrics]).to_csv(os.path.join(output_dir, f\"performance_fold_{fold}.csv\"), index=False)\n", "\n", " # Final training on the whole dataset\n", " tokenizer = XLMRobertaTokenizer.from_pretrained(model_path)\n", " encodings = tokenizer(texts, truncation=True, padding=True, max_length=512, return_tensors='pt')\n", " dataset = Dataset(encodings, labels)\n", "\n", " model = XLMRobertaForSequenceClassification.from_pretrained(model_path, num_labels=len(set(labels)))\n", "\n", " training_args = TrainingArguments(\n", " output_dir=os.path.join(output_dir, \"final_model\"),\n", " num_train_epochs=3,\n", " per_device_train_batch_size=16,\n", " )\n", "\n", " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", " train_dataset=dataset,\n", " )\n", "\n", " trainer.train()\n", " model.save_pretrained(os.path.join(output_dir, \"final_model\"))\n", " tokenizer.save_pretrained(os.path.join(output_dir, \"final_model\"))\n", "\n", " # Calculate average performance metrics\n", " avg_metrics = {metric: np.mean([m[metric] for m in metrics_list]) for metric in metrics_list[0]}\n", " pd.DataFrame([avg_metrics]).to_csv(os.path.join(output_dir, \"average_performance.csv\"), index=False)" ] }, { "cell_type": "markdown", "id": "ee0f0020-b398-49bd-8b6e-2760a9c69add", "metadata": {}, "source": [ "## 2. Wisdom of The Crowd Experiment\n", "This experiment consists of two main functions: `train_wisdom_bert_pipeline` and `train_wisdom_bert_normalized_sampling_pipeline`.\n", "1. train_wisdom_bert_pipeline does not maintain label distribution across experiments (i.e., the Single subset has a different ratio of toxic to non-toxic texts compared to the More subset).\n", "2. train_wisdom_bert_normalized_sampling_pipeline applies upsampling to ensure that the Single subset has a distribution of toxic to non-toxic texts that is nearly identical to the More subset." ] }, { "cell_type": "code", "execution_count": null, "id": "b6b41af5-5352-4445-9e11-0489e8f2db93", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import ast\n", "import os\n", "import numpy as np\n", "from sklearn.model_selection import StratifiedKFold\n", "from transformers import Trainer, TrainingArguments, BertForSequenceClassification, BertTokenizer\n", "import torch\n", "\n", "from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, average_precision_score\n", "\n", "def compute_metrics(pred):\n", " labels = pred.label_ids\n", " preds = pred.predictions.argmax(-1)\n", "\n", " # Accuracy\n", " accuracy = accuracy_score(labels, preds)\n", "\n", " # Macro F1, Precision, and Recall\n", " macro_f1 = f1_score(labels, preds, average='macro')\n", " precision = precision_score(labels, preds, average='macro')\n", " recall = recall_score(labels, preds, average='macro')\n", "\n", " # Class-1 only metrics (positive class)\n", " precision_class_1 = precision_score(labels, preds, pos_label=1)\n", " recall_class_1 = recall_score(labels, preds, pos_label=1)\n", " f1_class_1 = f1_score(labels, preds, pos_label=1)\n", "\n", " # Class-0 only metrics (negative class)\n", " precision_class_0 = precision_score(labels, preds, pos_label=0)\n", " recall_class_0 = recall_score(labels, preds, pos_label=0)\n", " f1_class_0 = f1_score(labels, preds, pos_label=0)\n", "\n", " # ROC-AUC score for binary classification\n", " try:\n", " # Compute the ROC AUC score for binary classification directly\n", " roc_auc = roc_auc_score(labels, preds)\n", " except ValueError:\n", " # In case there's an issue with the labels or predictions (e.g., all labels are the same)\n", " roc_auc = 0.5 # This would represent random classification if AUC can't be computed\n", "\n", " # Precision-Recall AUC\n", " precision_recall_auc = average_precision_score(labels, preds)\n", "\n", " return {\n", " 'accuracy': accuracy,\n", " 'macro_f1': macro_f1,\n", " 'precision': precision,\n", " 'recall': recall,\n", " 'precision_class_1': precision_class_1,\n", " 'recall_class_1': recall_class_1,\n", " 'f1_class_1': f1_class_1,\n", " 'precision_class_0': precision_class_0,\n", " 'recall_class_0': recall_class_0,\n", " 'f1_class_0': f1_class_0,\n", " 'roc_auc': roc_auc,\n", " 'precision_recall_auc': precision_recall_auc,\n", " }\n", "\n", "def wisdom_text_handler(merged_df):\n", " texts = merged_df['text'].tolist()\n", " labels = merged_df['label'].tolist()\n", " annot_counts = merged_df['annotator_count'].astype(int).tolist()\n", " return texts, labels, annot_counts\n", "\n", "def wisdom_any_text_handler(merged_df):\n", " texts = merged_df['text'].tolist()\n", " merged_df['polarized'] = merged_df['polarized'].apply(lambda x: [int(y) for y in ast.literal_eval(x)])\n", " merged_df['polarized_value'] = merged_df['polarized'].apply(lambda x: sum(x)/len(x))\n", " merged_df['any_label'] = merged_df['polarized_value'].apply(lambda x: 1 if x > 0 else 0) \n", " any_label = merged_df['any_label'].tolist()\n", " labels = merged_df['label'].tolist()\n", " annot_counts = merged_df['annotator_count'].astype(int).tolist()\n", " return texts, labels, annot_counts, any_label\n", "\n", "def train_wisdom_bert_pipeline(model_path: str, merged_df, output_dir: str, approach: str = \"single\"):\n", "\n", " # Handle texts and labels:\n", " texts, labels, annot_counts = wisdom_text_handler(merged_df)\n", "\n", " # Create output directory\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n", " metrics_list = []\n", "\n", " for fold, (train_index, test_index) in enumerate(skf.split(texts, labels)):\n", " train_annot_counts = np.array(annot_counts)[train_index]\n", " train_texts, test_texts = np.array(texts)[train_index], np.array(texts)[test_index]\n", " train_labels, test_labels = np.array(labels)[train_index], np.array(labels)[test_index]\n", " target_annot_count = 1\n", " if approach == \"single\":\n", " train_indices = np.where(train_annot_counts == target_annot_count)[0]\n", " train_texts = np.array(train_texts)[train_indices]\n", " train_labels = np.array(train_labels)[train_indices]\n", " elif approach == \"more\":\n", " train_indices = np.where(train_annot_counts != target_annot_count)[0]\n", " train_texts = np.array(train_texts)[train_indices]\n", " train_labels = np.array(train_labels)[train_indices]\n", " else:\n", " raise ValueError(f\"Invalid approach: {approach}\")\n", " # Tokenize\n", " tokenizer = BertTokenizer.from_pretrained(model_path)\n", " train_encodings = tokenizer(train_texts.tolist(), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", " test_encodings = tokenizer(test_texts.tolist(), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", "\n", " train_dataset = Dataset(train_encodings, train_labels)\n", " test_dataset = Dataset(test_encodings, test_labels)\n", "\n", " model = BertForSequenceClassification.from_pretrained(model_path, num_labels=len(set(labels)))\n", "\n", " training_args = TrainingArguments(\n", " output_dir=os.path.join(output_dir, f\"model_fold_{fold}\"),\n", " evaluation_strategy=\"epoch\",\n", " per_device_train_batch_size=16,\n", " per_device_eval_batch_size=64,\n", " num_train_epochs=3,\n", " logging_dir=os.path.join(output_dir, f\"logs_fold_{fold}\"),\n", " )\n", "\n", " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", " train_dataset=train_dataset,\n", " eval_dataset=test_dataset,\n", " compute_metrics=compute_metrics,\n", " )\n", "\n", " # Train and evaluate\n", " trainer.train()\n", " metrics = trainer.evaluate()\n", " metrics_list.append(metrics)\n", "\n", " # Save model\n", " model.save_pretrained(os.path.join(output_dir, f\"model_fold_{fold}\"))\n", " tokenizer.save_pretrained(os.path.join(output_dir, f\"model_fold_{fold}\"))\n", "\n", " # Save performance report\n", " pd.DataFrame([metrics]).to_csv(os.path.join(output_dir, f\"performance_fold_{fold}.csv\"), index=False)\n", "\n", " # Calculate average performance metrics\n", " avg_metrics = {metric: np.mean([m[metric] for m in metrics_list]) for metric in metrics_list[0]}\n", " pd.DataFrame([avg_metrics]).to_csv(os.path.join(output_dir, \"average_performance.csv\"), index=False)\n", "\n", "from sklearn.utils import resample\n", "\n", "def train_wisdom_bert_normalized_sampling_pipeline(model_path: str, merged_df, output_dir: str, approach: str = \"single\"):\n", "\n", " # Handle texts and labels:\n", " texts, labels, annot_counts = wisdom_text_handler(merged_df)\n", " merged_df[\"label\"] = labels\n", " merged_df[\"text\"] = texts\n", " merged_df[\"annotator_count\"] = annot_counts\n", "\n", " # Create output directory\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n", " metrics_list = []\n", "\n", " for fold, (train_index, test_index) in enumerate(skf.split(texts, labels)):\n", " train_df = merged_df.iloc[train_index]\n", " test_df = merged_df.iloc[test_index]\n", "\n", " # Normalize class ratio in train_df\n", " normalized_dfs = []\n", " \n", " if approach == \"single\":\n", " subset = train_df[train_df[\"annotator_count\"] == 1]\n", " elif approach == \"more\":\n", " subset = train_df[train_df[\"annotator_count\"] != 1]\n", " \n", " # Split into classes\n", " class_0 = subset[subset[\"label\"] == 0]\n", " class_1 = subset[subset[\"label\"] == 1]\n", "\n", " # Desired number of class 1 samples to maintain a 1:3 ratio\n", " target_class_1_count = len(class_0) // 3\n", "\n", " # Resample class 1 (upsample or downsample as needed)\n", " if len(class_1) > target_class_1_count:\n", " class_1_resampled = resample(class_1, replace=False, n_samples=target_class_1_count, random_state=42)\n", " else:\n", " class_1_resampled = resample(class_1, replace=True, n_samples=target_class_1_count, random_state=42)\n", "\n", " # Combine resampled class 1 with class 0\n", " normalized_subset = pd.concat([class_0, class_1_resampled])\n", " normalized_dfs.append(normalized_subset)\n", " normalized_train_df = pd.concat(normalized_dfs)\n", " print(normalized_train_df['label'].value_counts())\n", "\n", " # Prepare texts and labels for training\n", " train_texts = normalized_train_df[\"text\"].tolist()\n", " train_labels = normalized_train_df[\"label\"].tolist()\n", "\n", " test_texts = test_df[\"text\"].tolist()\n", " test_labels = test_df[\"label\"].tolist()\n", "\n", " # Tokenize\n", " tokenizer = BertTokenizer.from_pretrained(model_path)\n", " train_encodings = tokenizer(train_texts, truncation=True, padding=True, max_length=512, return_tensors=\"pt\")\n", " test_encodings = tokenizer(test_texts, truncation=True, padding=True, max_length=512, return_tensors=\"pt\")\n", "\n", " train_dataset = Dataset(train_encodings, train_labels)\n", " test_dataset = Dataset(test_encodings, test_labels)\n", "\n", " model = BertForSequenceClassification.from_pretrained(model_path, num_labels=2)\n", "\n", " training_args = TrainingArguments(\n", " output_dir=os.path.join(output_dir, f\"model_fold_{fold}\"),\n", " evaluation_strategy=\"epoch\",\n", " per_device_train_batch_size=16,\n", " per_device_eval_batch_size=64,\n", " num_train_epochs=3,\n", " logging_dir=os.path.join(output_dir, f\"logs_fold_{fold}\"),\n", " )\n", "\n", " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", " train_dataset=train_dataset,\n", " eval_dataset=test_dataset,\n", " compute_metrics=compute_metrics,\n", " )\n", "\n", " # Train and evaluate\n", " trainer.train()\n", " metrics = trainer.evaluate()\n", " metrics_list.append(metrics)\n", "\n", " # Save model\n", " model.save_pretrained(os.path.join(output_dir, f\"model_fold_{fold}\"))\n", " tokenizer.save_pretrained(os.path.join(output_dir, f\"model_fold_{fold}\"))\n", "\n", " # Save performance report\n", " pd.DataFrame([metrics]).to_csv(os.path.join(output_dir, f\"performance_fold_{fold}.csv\"), index=False)\n", "\n", " # Calculate average performance metrics\n", " avg_metrics = {metric: np.mean([m[metric] for m in metrics_list]) for metric in metrics_list[0]}\n", " pd.DataFrame([avg_metrics]).to_csv(os.path.join(output_dir, \"average_performance.csv\"), index=False)\n", "\n", "# Dataset class to handle encoding\n", "class Dataset(torch.utils.data.Dataset):\n", " def __init__(self, encodings, labels):\n", " self.encodings = encodings\n", " self.labels = [int(label) for label in labels] \n", "\n", " def __getitem__(self, idx):\n", " item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n", " item['labels'] = torch.tensor(self.labels[idx])\n", " return item\n", "\n", " def __len__(self):\n", " return len(self.labels)" ] }, { "cell_type": "markdown", "id": "ccd9dae4-279d-436c-9f31-c3326b143ea0", "metadata": {}, "source": [ "## Using \"Polarization\" as a Feature for Toxicity Detection\n", "Main Function:\n", "`train_featural_bert_pipeline_with_polarized_feature`\n", "\n", "Note: \n", "1. For method = `any`, `bin` and `bin-ceil` method are for pre-eliminary experiments. Both of these methods lead to a worse performing model than `agg`.\n", "2. We attempted to use the English translation of the prompt to incorporate the \"Polarization\" feature. However, using the English translation leads to a worsening performance, as IndoBERTweet is mainly trained on the Indonesian language." ] }, { "cell_type": "code", "execution_count": null, "id": "a6742fe8-2346-4295-98b2-6a95c915b497", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import ast\n", "import os\n", "import numpy as np\n", "from sklearn.model_selection import StratifiedKFold\n", "from transformers import Trainer, TrainingArguments, BertForSequenceClassification, BertTokenizer\n", "import torch\n", "\n", "from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, average_precision_score\n", "\n", "def compute_metrics(pred):\n", " labels = pred.label_ids\n", " preds = pred.predictions.argmax(-1)\n", "\n", " # Accuracy\n", " accuracy = accuracy_score(labels, preds)\n", "\n", " # Macro F1, Precision, and Recall\n", " macro_f1 = f1_score(labels, preds, average='macro')\n", " precision = precision_score(labels, preds, average='macro')\n", " recall = recall_score(labels, preds, average='macro')\n", "\n", " # Class-1 only metrics (positive class)\n", " precision_class_1 = precision_score(labels, preds, pos_label=1)\n", " recall_class_1 = recall_score(labels, preds, pos_label=1)\n", " f1_class_1 = f1_score(labels, preds, pos_label=1)\n", "\n", " # Class-0 only metrics (negative class)\n", " precision_class_0 = precision_score(labels, preds, pos_label=0)\n", " recall_class_0 = recall_score(labels, preds, pos_label=0)\n", " f1_class_0 = f1_score(labels, preds, pos_label=0)\n", "\n", " # ROC-AUC score for binary classification\n", " try:\n", " # Compute the ROC AUC score for binary classification directly\n", " roc_auc = roc_auc_score(labels, preds)\n", " except ValueError:\n", " # In case there's an issue with the labels or predictions (e.g., all labels are the same)\n", " roc_auc = 0.5 # This would represent random classification if AUC can't be computed\n", "\n", " # Precision-Recall AUC\n", " precision_recall_auc = average_precision_score(labels, preds)\n", "\n", " return {\n", " 'accuracy': accuracy,\n", " 'macro_f1': macro_f1,\n", " 'precision': precision,\n", " 'recall': recall,\n", " 'precision_class_1': precision_class_1,\n", " 'recall_class_1': recall_class_1,\n", " 'f1_class_1': f1_class_1,\n", " 'precision_class_0': precision_class_0,\n", " 'recall_class_0': recall_class_0,\n", " 'f1_class_0': f1_class_0,\n", " 'roc_auc': roc_auc,\n", " 'precision_recall_auc': precision_recall_auc,\n", " }\n", "\n", "def featural_text_handler(merged_df, method: str = \"agg\", language: str = \"id\"):\n", " \"\"\"\n", " Handles toxic and non-toxic text datasets for toxicity classification, \n", " applying polarization processing and text formatting.\n", "\n", " Method options:\n", " 1. agg = Aggregate value with a range of [0, 1].\n", " 2. bin = Binarized, values of either 0 or 1 (values of 0.5 converted to 0).\n", " 3. bin-ceil = Binarized, but values of 0.5 converted to 1.\n", " 4. any = Binarized, any value above 0 is converted to 1.\n", "\n", " Language options:\n", " 1. id = Indonesian.\n", " 2. en = English.\n", " \"\"\"\n", " def process_polarized_values(row, method):\n", " \"\"\"Processes the polarization values according to the selected method.\"\"\"\n", " values = ast.literal_eval(row['polarized']) if isinstance(row['polarized'], str) else row['polarized']\n", " values = [int(x) for x in values]\n", " if not values:\n", " return 0 # Default for missing or empty polarization\n", " \n", " agg_value = sum(values) / len(values)\n", " if method == \"agg\":\n", " return agg_value\n", " elif method == \"bin\":\n", " return 1 if agg_value > 0.5 else 0\n", " elif method == \"bin-ceil\":\n", " return 1 if agg_value >= 0.5 else 0\n", " elif method == \"any\":\n", " return 1 if agg_value > 0 else 0\n", " else:\n", " raise ValueError(f\"Unsupported method: {method}\")\n", " merged_df['polarized'] = merged_df['polarized'].fillna(0)\n", " merged_df['polarized_value'] = merged_df.apply(lambda row: process_polarized_values(row, method), axis=1)\n", "\n", " # Format text based on language and add polarization\n", " def format_text(row, language):\n", " if language == \"id\":\n", " return f\"Nilai polarisasi rata-rata (rentang 0 hingga 1): {row['polarized_value']} [SEP] {row['text']}\"\n", " elif language == \"en\":\n", " return f\"Average polarization value (range of 0 to 1): {row['polarized_value']} [SEP] {row['text']}\"\n", " else:\n", " raise ValueError(f\"Unsupported language: {language}\")\n", "\n", " merged_df['combined_text'] = merged_df.apply(lambda row: format_text(row, language), axis=1)\n", "\n", " # Prepare outputs\n", " texts = merged_df['combined_text'].tolist()\n", " labels = merged_df['label'].tolist()\n", "\n", " return texts, labels\n", "\n", "\n", "def train_featural_bert_pipeline_with_polarized_feature(model_path: str, merged_df, output_dir: str,\n", " method: str = \"agg\", language: str = \"id\", raw_test: bool = False):\n", " # Handle texts and labels\n", " texts, labels = featural_text_handler(merged_df, method, language)\n", "\n", " # Create output directory\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n", " metrics_list = []\n", "\n", " for fold, (train_index, test_index) in enumerate(skf.split(texts, labels)):\n", " train_texts, test_texts = np.array(texts)[train_index], np.array(texts)[test_index]\n", " if raw_test:\n", " test_texts = [text.split('[SEP]')[-1].strip() for text in test_texts]\n", " train_labels, test_labels = np.array(labels)[train_index], np.array(labels)[test_index]\n", "\n", " # Tokenize\n", " tokenizer = BertTokenizer.from_pretrained(model_path)\n", " train_encodings = tokenizer(list(train_texts), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", " test_encodings = tokenizer(list(test_texts), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", "\n", " train_dataset = Dataset(train_encodings, train_labels)\n", " test_dataset = Dataset(test_encodings, test_labels)\n", "\n", " model = BertForSequenceClassification.from_pretrained(model_path, num_labels=len(set(labels)))\n", "\n", " training_args = TrainingArguments(\n", " output_dir=os.path.join(output_dir, f\"model_fold_{fold}\"),\n", " evaluation_strategy=\"epoch\",\n", " per_device_train_batch_size=16,\n", " per_device_eval_batch_size=64,\n", " num_train_epochs=3,\n", " logging_dir=os.path.join(output_dir, f\"logs_fold_{fold}\"),\n", " save_strategy=\"no\",\n", " )\n", "\n", " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", " train_dataset=train_dataset,\n", " eval_dataset=test_dataset,\n", " compute_metrics=compute_metrics,\n", " )\n", "\n", " # Train and evaluate\n", " trainer.train()\n", " metrics = trainer.evaluate()\n", " metrics_list.append(metrics)\n", "\n", " # Save performance report\n", " pd.DataFrame([metrics]).to_csv(os.path.join(output_dir, f\"performance_fold_{fold}.csv\"), index=False)\n", "\n", " # Calculate average performance metrics\n", " avg_metrics = {metric: np.mean([m[metric] for m in metrics_list]) for metric in metrics_list[0]}\n", " pd.DataFrame([avg_metrics]).to_csv(os.path.join(output_dir, \"average_performance.csv\"), index=False)\n", "\n", "# Dataset class to handle encoding\n", "class Dataset(torch.utils.data.Dataset):\n", " def __init__(self, encodings, labels):\n", " self.encodings = encodings\n", " self.labels = [int(label) for label in labels] \n", "\n", " def __getitem__(self, idx):\n", " item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n", " item['labels'] = torch.tensor(self.labels[idx])\n", " return item\n", "\n", " def __len__(self):\n", " return len(self.labels)\n" ] }, { "cell_type": "markdown", "id": "6f2d9779-56f3-400f-bf08-9085532026ba", "metadata": {}, "source": [ "## 4. Incorporating Demographic Information\n", "Some normalization are required before passing the values to the model.\n", "\n", "\n", "Main function: `exploded_df_train_baseline_bert_pipeline_with_demographic_feature`" ] }, { "cell_type": "code", "execution_count": null, "id": "7d7a591a-fb1c-495e-ba69-cc11d3b2a94c", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import ast\n", "import os\n", "import numpy as np\n", "from typing import List\n", "from sklearn.model_selection import StratifiedKFold\n", "from transformers import Trainer, TrainingArguments, BertForSequenceClassification, BertTokenizer\n", "import torch\n", "\n", "from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, average_precision_score\n", "\n", "def compute_metrics(pred):\n", " labels = pred.label_ids\n", " preds = pred.predictions.argmax(-1)\n", "\n", " # Accuracy\n", " accuracy = accuracy_score(labels, preds)\n", "\n", " # Macro F1, Precision, and Recall\n", " macro_f1 = f1_score(labels, preds, average='macro')\n", " precision = precision_score(labels, preds, average='macro')\n", " recall = recall_score(labels, preds, average='macro')\n", "\n", " # Class-1 only metrics (positive class)\n", " precision_class_1 = precision_score(labels, preds, pos_label=1)\n", " recall_class_1 = recall_score(labels, preds, pos_label=1)\n", " f1_class_1 = f1_score(labels, preds, pos_label=1)\n", "\n", " # Class-0 only metrics (negative class)\n", " precision_class_0 = precision_score(labels, preds, pos_label=0)\n", " recall_class_0 = recall_score(labels, preds, pos_label=0)\n", " f1_class_0 = f1_score(labels, preds, pos_label=0)\n", "\n", " # ROC-AUC score for binary classification\n", " try:\n", " # Compute the ROC AUC score for binary classification directly\n", " roc_auc = roc_auc_score(labels, preds)\n", " except ValueError:\n", " # In case there's an issue with the labels or predictions (e.g., all labels are the same)\n", " roc_auc = 0.5 # This would represent random classification if AUC can't be computed\n", "\n", " # Precision-Recall AUC\n", " precision_recall_auc = average_precision_score(labels, preds)\n", "\n", " return {\n", " 'accuracy': accuracy,\n", " 'macro_f1': macro_f1,\n", " 'precision': precision,\n", " 'recall': recall,\n", " 'precision_class_1': precision_class_1,\n", " 'recall_class_1': recall_class_1,\n", " 'f1_class_1': f1_class_1,\n", " 'precision_class_0': precision_class_0,\n", " 'recall_class_0': recall_class_0,\n", " 'f1_class_0': f1_class_0,\n", " 'roc_auc': roc_auc,\n", " 'precision_recall_auc': precision_recall_auc,\n", " }\n", "\n", "def demographic_text_handler(merged_df):\n", " texts = merged_df['text'].tolist()\n", " labels = merged_df['label'].tolist()\n", " return merged_df, texts, labels\n", "\n", "def single_level_demographic_text_handler(df, demographic: List[str] = [], language: str = \"id\"):\n", " \"\"\"\n", " Handles polar and non-polar text datasets for polarity classification, \n", " applying demographic information\n", "\n", " Language options:\n", " 1. id = Indonesian.\n", " 2. en = English.\n", "\n", " Demographic options, may be a list of strings:\n", " 1. ethnicity\n", " 2. religion\n", " 3. disability\n", " 4. lgbt\n", " 5. gender\n", " 6. age_group\n", " 7. domisili\n", " 8. pendidikan terakhir\n", " 9. status pekerjaan\n", " 10. president vote leaning\n", " \"\"\"\n", "\n", " id_demographic_names = {\n", " 'ethnicity': 'etnisitas',\n", " 'religion' : 'agama',\n", " 'disability': 'disabilitas',\n", " 'lgbt' : 'lgbt',\n", " 'gender': 'gender',\n", " 'age_group': 'generasi',\n", " 'domisili': 'domisili',\n", " 'pendidikan terakhir': 'pendidikan terakhir',\n", " 'status pekerjaan': 'status pekerjaan',\n", " 'president vote leaning': 'pilihan presiden'\n", " }\n", "\n", " en_demographic_names = {\n", " 'ethnicity': 'ethnicity',\n", " 'religion': 'religion',\n", " 'disability': 'disability',\n", " 'lgbt': 'lgbt',\n", " 'gender': 'gender',\n", " 'age_group': 'generation',\n", " 'domisili': 'domicile',\n", " 'pendidikan terakhir': 'last education level',\n", " 'status pekerjaan': 'job status',\n", " 'president vote leaning': 'president vote leaning'\n", " \n", " }\n", "\n", " # preprocess toxic dataset\n", " df['label'] = df['toxicity']\n", " \n", " # Format text based on language and add polarization\n", " def format_text(row, language, demographic):\n", " if language == \"id\":\n", " if len(demographic) == 0:\n", " return \"Informasi Demografis: Tidak tersedia\"\n", " \n", " input_string = \"Informasi Demografis:\\n\"\n", " for demo in demographic:\n", " input_string += f\"{id_demographic_names[demo]}: {row[demo]}\\n\"\n", " input_string = input_string.strip(\"\\n\")\n", " input_string = f\"{input_string} [SEP] {row['text']}\"\n", " return input_string\n", " elif language == \"en\":\n", " if len(demographic) == 0:\n", " return \"Demographic Information: Not available\"\n", "\n", " input_string = \"Demographic Information:\\n\"\n", " for demo in demographic:\n", " input_string += f\"{en_demographic_names[demo]}: {row[demo]}\\n\"\n", " input_string = input_string.strip(\"\\n\")\n", " input_string = f\"{input_string} [SEP] {row['text']}\"\n", " return input_string\n", " else:\n", " raise ValueError(f\"Unsupported language: {language}\")\n", "\n", " df['combined_text'] = df.apply(lambda row: format_text(row, language, demographic), axis=1)\n", " print(df['label'].value_counts())\n", "\n", " # Prepare outputs\n", " texts = df['combined_text'].tolist()\n", " labels = df['label'].tolist()\n", "\n", " return texts, labels\n", "\n", "import pandas as pd\n", "import ast\n", "def process_and_explode(df):\n", " def age_group_f(x):\n", " if 12 <= x <= 29:\n", " return \"Gen Z\"\n", " if 30 <= x <= 44:\n", " return \"Millenials\"\n", " if 45 <= x <= 59:\n", " return \"Gen X\"\n", " \n", " def president_vote_f(x):\n", " if x == \"1\":\n", " return \"Anies Rasyid Baswedan-Muhaimin Iskandar\"\n", " if x == \"2\":\n", " return \"Prabowo Subianto-Gibran Rakabuming Raka\"\n", " if x == \"3\":\n", " return \"Ganjar Pranowo-Mahfud MD\"\n", " return x \n", " \n", " annotator_df = pd.read_json(\"hf://datasets/Exqrch/IndoToxic2024/indotoxic2024_annotator_demographic_data_v2_final.jsonl\", lines=True)\n", " annotator_df['gender'] = annotator_df['gender'].apply(lambda x: x.strip())\n", " annotator_df['age_group'] = annotator_df['age'].astype(int)\n", " annotator_df['age_group'] = annotator_df['age_group'].apply(lambda x: age_group_f(x))\n", " annotator_df['status pekerjaan'] = annotator_df['status pekerjaan'].apply(lambda x: 'Tidak Bekerja' if x == \"Ibu Rumah Tangga\" else x)\n", " annotator_df['president vote leaning'] = annotator_df['president vote leaning'].apply(lambda x: \"Tidak ada\" if x not in [\"1\", \"2\", \"3\"] else x)\n", " annotator_df['president vote leaning'] = annotator_df['president vote leaning'].apply(lambda x : president_vote_f(x)) \n", " annotator_df['annotator_id'] = annotator_df['annotator_id'].astype(str)\n", "\n", " columns = [\n", " 'is_noise_or_spam_text',\n", " 'related_to_election_2024',\n", " 'toxicity',\n", " 'polarized',\n", " 'profanity_obscenity',\n", " 'threat_incitement_to_violence',\n", " 'insults',\n", " 'identity_attack',\n", " 'sexually_explicit'\n", " ]\n", "\n", " df['annotators_id'] = df['annotators_id'].apply(lambda x: ast.literal_eval(x))\n", " df_exploded = df.explode('annotators_id')\n", " df_exploded.rename(columns={\n", " 'annotators_id': 'annotator_id'\n", " }, inplace=True)\n", " merged_df = df_exploded.merge(annotator_df, on=\"annotator_id\", how=\"inner\")\n", " merged_df['text_id_index'] = merged_df.groupby('text_id').cumcount()\n", " merged_df['text_id_index'] = merged_df['text_id_index'].astype(int)\n", " for col in columns:\n", " merged_df[col] = merged_df[col].apply(lambda x: ast.literal_eval(x))\n", " merged_df[col] = merged_df.apply(lambda row: row[col][row['text_id_index']], axis=1)\n", "\n", " return merged_df\n", " \n", "def exploded_df_train_baseline_bert_pipeline_with_demographic_feature(model_path: str, \n", " merged_df, \n", " output_dir: str,\n", " demographic: List[str] = [], \n", " language: str = \"id\",\n", " raw_test: bool = False):\n", " # Handle texts and labels\n", " original_df, texts, labels = demographic_text_handler(merged_df) # Just using this to ensure replicability with old baseline\n", "\n", " # Create output directory\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n", " metrics_list = []\n", "\n", " for fold, (train_index, test_index) in enumerate(skf.split(texts, labels)):\n", " train_df = original_df.iloc[train_index]\n", " test_df = original_df.iloc[test_index]\n", "\n", " train_df = process_and_explode(train_df)\n", " test_df = process_and_explode(test_df)\n", "\n", " train_texts, train_labels = single_level_demographic_text_handler(train_df, demographic, language)\n", " test_texts, test_labels = single_level_demographic_text_handler(test_df, demographic, language)\n", " \n", " if raw_test:\n", " test_texts = [text.split('[SEP]')[-1].strip() for text in test_texts]\n", "\n", " train_labels = [int(x) for x in train_labels]\n", " test_labels = [int(x) for x in test_labels]\n", "\n", " # Tokenize\n", " tokenizer = BertTokenizer.from_pretrained(model_path)\n", " train_encodings = tokenizer(list(train_texts), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", " test_encodings = tokenizer(list(test_texts), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", "\n", " train_dataset = Dataset(train_encodings, train_labels)\n", " test_dataset = Dataset(test_encodings, test_labels)\n", "\n", " model = BertForSequenceClassification.from_pretrained(model_path, num_labels=len(set(train_labels)))\n", "\n", " training_args = TrainingArguments(\n", " output_dir=os.path.join(output_dir, f\"temp_model_fold_{fold}\"), # Temporary directory for Trainer\n", " evaluation_strategy=\"epoch\",\n", " per_device_train_batch_size=16,\n", " per_device_eval_batch_size=64,\n", " num_train_epochs=3,\n", " logging_dir=os.path.join(output_dir, f\"logs_fold_{fold}\"),\n", " save_strategy=\"no\", # Prevent model saving during fold training\n", " )\n", "\n", " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", " train_dataset=train_dataset,\n", " eval_dataset=test_dataset,\n", " compute_metrics=compute_metrics,\n", " )\n", "\n", " # Train and evaluate\n", " trainer.train()\n", " metrics = trainer.evaluate()\n", " metrics_list.append(metrics)\n", "\n", " # Save performance report\n", " pd.DataFrame([metrics]).to_csv(os.path.join(output_dir, f\"performance_fold_{fold}.csv\"), index=False)\n", "\n", " # Calculate average performance metrics\n", " avg_metrics = {metric: np.mean([m[metric] for m in metrics_list]) for metric in metrics_list[0]}\n", " pd.DataFrame([avg_metrics]).to_csv(os.path.join(output_dir, \"average_performance.csv\"), index=False)\n", "\n", "# Dataset class to handle encoding\n", "class Dataset(torch.utils.data.Dataset):\n", " def __init__(self, encodings, labels):\n", " self.encodings = encodings\n", " self.labels = [int(label) for label in labels] \n", "\n", " def __getitem__(self, idx):\n", " item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n", " item['labels'] = torch.tensor(self.labels[idx])\n", " return item\n", "\n", " def __len__(self):\n", " return len(self.labels)\n" ] }, { "cell_type": "markdown", "id": "1c18e970-0af6-495a-afbc-c03a6122ab43", "metadata": {}, "source": [ "## 5. Combining Polarization and Demographic Information for Toxicity Detection\n", "Main Function: `exploded_df_train_baseline_bert_pipeline_with_polarization_and_demographic_feature`" ] }, { "cell_type": "code", "execution_count": null, "id": "65331385-1a47-42b6-b4d2-2e930e045178", "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "import ast\n", "import os\n", "import numpy as np\n", "from typing import List\n", "from sklearn.model_selection import StratifiedKFold\n", "from transformers import Trainer, TrainingArguments, BertForSequenceClassification, BertTokenizer\n", "import torch\n", "\n", "from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, average_precision_score\n", "\n", "def compute_metrics(pred):\n", " labels = pred.label_ids\n", " preds = pred.predictions.argmax(-1)\n", "\n", " # Accuracy\n", " accuracy = accuracy_score(labels, preds)\n", "\n", " # Macro F1, Precision, and Recall\n", " macro_f1 = f1_score(labels, preds, average='macro')\n", " precision = precision_score(labels, preds, average='macro')\n", " recall = recall_score(labels, preds, average='macro')\n", "\n", " # Class-1 only metrics (positive class)\n", " precision_class_1 = precision_score(labels, preds, pos_label=1)\n", " recall_class_1 = recall_score(labels, preds, pos_label=1)\n", " f1_class_1 = f1_score(labels, preds, pos_label=1)\n", "\n", " # Class-0 only metrics (negative class)\n", " precision_class_0 = precision_score(labels, preds, pos_label=0)\n", " recall_class_0 = recall_score(labels, preds, pos_label=0)\n", " f1_class_0 = f1_score(labels, preds, pos_label=0)\n", "\n", " # ROC-AUC score for binary classification\n", " try:\n", " # Compute the ROC AUC score for binary classification directly\n", " roc_auc = roc_auc_score(labels, preds)\n", " except ValueError:\n", " # In case there's an issue with the labels or predictions (e.g., all labels are the same)\n", " roc_auc = 0.5 # This would represent random classification if AUC can't be computed\n", "\n", " # Precision-Recall AUC\n", " precision_recall_auc = average_precision_score(labels, preds)\n", "\n", " return {\n", " 'accuracy': accuracy,\n", " 'macro_f1': macro_f1,\n", " 'precision': precision,\n", " 'recall': recall,\n", " 'precision_class_1': precision_class_1,\n", " 'recall_class_1': recall_class_1,\n", " 'f1_class_1': f1_class_1,\n", " 'precision_class_0': precision_class_0,\n", " 'recall_class_0': recall_class_0,\n", " 'f1_class_0': f1_class_0,\n", " 'roc_auc': roc_auc,\n", " 'precision_recall_auc': precision_recall_auc,\n", " }\n", "\n", "def single_level_toxicity_and_demographic_text_handler(df, demographic: List[str] = [], language: str = \"id\"):\n", " \"\"\"\n", " Handles polar and non-polar text datasets for polarity classification, \n", " applying demographic information\n", "\n", " Language options:\n", " 1. id = Indonesian.\n", " 2. en = English.\n", "\n", " Demographic options, may be a list of strings:\n", " 1. ethnicity\n", " 2. religion\n", " 3. disability\n", " 4. lgbt\n", " 5. gender\n", " 6. age_group\n", " 7. domisili\n", " 8. pendidikan terakhir\n", " 9. status pekerjaan\n", " 10. president vote leaning\n", " \"\"\"\n", "\n", " id_demographic_names = {\n", " 'ethnicity': 'etnisitas',\n", " 'religion' : 'agama',\n", " 'disability': 'disabilitas',\n", " 'lgbt' : 'lgbt',\n", " 'gender': 'gender',\n", " 'age_group': 'generasi',\n", " 'domisili': 'domisili',\n", " 'pendidikan terakhir': 'pendidikan terakhir',\n", " 'status pekerjaan': 'status pekerjaan',\n", " 'president vote leaning': 'pilihan presiden'\n", " }\n", "\n", " en_demographic_names = {\n", " 'ethnicity': 'ethnicity',\n", " 'religion': 'religion',\n", " 'disability': 'disability',\n", " 'lgbt': 'lgbt',\n", " 'gender': 'gender',\n", " 'age_group': 'generation',\n", " 'domisili': 'domicile',\n", " 'pendidikan terakhir': 'last education level',\n", " 'status pekerjaan': 'job status',\n", " 'president vote leaning': 'president vote leaning'\n", " \n", " }\n", "\n", " # preprocess toxic dataset\n", " df['label'] = df['toxicity']\n", " \n", " # Format text based on language and add polarization\n", " def format_text(row, language, demographic):\n", " if language == \"id\":\n", " if len(demographic) == 0:\n", " return \"Informasi Demografis dan Toksisitas: Tidak tersedia\"\n", " \n", " input_string = f\"{row['combined_text']}\\nInformasi Demografis:\\n\"\n", " for demo in demographic:\n", " input_string += f\"{id_demographic_names[demo]}: {row[demo]}\\n\"\n", " input_string = input_string.strip(\"\\n\")\n", " input_string = f\"{input_string} [SEP] {row['text']}\"\n", " return input_string\n", " elif language == \"en\":\n", " if len(demographic) == 0:\n", " return \"Demographic Information and Toxicity: Not available\"\n", "\n", " input_string = f\"{row['combined_text']}\\nDemographic Information:\\n\"\n", " for demo in demographic:\n", " input_string += f\"{en_demographic_names[demo]}: {row[demo]}\\n\"\n", " input_string = input_string.strip(\"\\n\")\n", " input_string = f\"{input_string} [SEP] {row['text']}\"\n", " return input_string\n", " else:\n", " raise ValueError(f\"Unsupported language: {language}\")\n", "\n", " df['combined_text'] = df.apply(lambda row: format_text(row, language, demographic), axis=1)\n", " print(df['label'].value_counts())\n", "\n", " # Prepare outputs\n", " texts = df['combined_text'].tolist()\n", " labels = df['label'].tolist()\n", "\n", " return texts, labels\n", "\n", "import pandas as pd\n", "import ast\n", "def process_and_explode(df):\n", " def age_group_f(x):\n", " if 12 <= x <= 29:\n", " return \"Gen Z\"\n", " if 30 <= x <= 44:\n", " return \"Millenials\"\n", " if 45 <= x <= 59:\n", " return \"Gen X\"\n", " \n", " def president_vote_f(x):\n", " if x == \"1\":\n", " return \"Anies Rasyid Baswedan-Muhaimin Iskandar\"\n", " if x == \"2\":\n", " return \"Prabowo Subianto-Gibran Rakabuming Raka\"\n", " if x == \"3\":\n", " return \"Ganjar Pranowo-Mahfud MD\"\n", " return x \n", " \n", " annotator_df = pd.read_json(\"hf://datasets/Exqrch/IndoToxic2024/indotoxic2024_annotator_demographic_data_v2_final.jsonl\", lines=True)\n", " annotator_df['gender'] = annotator_df['gender'].apply(lambda x: x.strip())\n", " annotator_df['age_group'] = annotator_df['age'].astype(int)\n", " annotator_df['age_group'] = annotator_df['age_group'].apply(lambda x: age_group_f(x))\n", " annotator_df['status pekerjaan'] = annotator_df['status pekerjaan'].apply(lambda x: 'Tidak Bekerja' if x == \"Ibu Rumah Tangga\" else x)\n", " annotator_df['president vote leaning'] = annotator_df['president vote leaning'].apply(lambda x: \"Tidak ada\" if x not in [\"1\", \"2\", \"3\"] else x)\n", " annotator_df['president vote leaning'] = annotator_df['president vote leaning'].apply(lambda x : president_vote_f(x)) \n", " annotator_df['annotator_id'] = annotator_df['annotator_id'].astype(str)\n", "\n", " columns = [\n", " 'is_noise_or_spam_text',\n", " 'related_to_election_2024',\n", " 'toxicity',\n", " 'polarized',\n", " 'profanity_obscenity',\n", " 'threat_incitement_to_violence',\n", " 'insults',\n", " 'identity_attack',\n", " 'sexually_explicit'\n", " ]\n", "\n", " df['annotators_id'] = df['annotators_id'].apply(lambda x: ast.literal_eval(x))\n", " df_exploded = df.explode('annotators_id')\n", " df_exploded.rename(columns={\n", " 'annotators_id': 'annotator_id'\n", " }, inplace=True)\n", " merged_df = df_exploded.merge(annotator_df, on=\"annotator_id\", how=\"inner\")\n", " merged_df['text_id_index'] = merged_df.groupby('text_id').cumcount()\n", " merged_df['text_id_index'] = merged_df['text_id_index'].astype(int)\n", " for col in columns:\n", " merged_df[col] = merged_df[col].apply(lambda x: ast.literal_eval(x))\n", " merged_df[col] = merged_df.apply(lambda row: row[col][row['text_id_index']], axis=1)\n", "\n", " return merged_df\n", "\n", "def polarity_text_handler(merged_df, method: str = \"agg\", language: str = \"id\"):\n", " \"\"\"\n", " Handles toxic and non-toxic text datasets for toxicity classification, \n", " applying polarization processing and text formatting.\n", "\n", " Method options:\n", " 1. agg = Aggregate value with a range of [0, 1].\n", " 2. bin = Binarized, values of either 0 or 1 (values of 0.5 converted to 0).\n", " 3. bin-ceil = Binarized, but values of 0.5 converted to 1.\n", " 4. any = Binarized, any value above 0 is converted to 1.\n", "\n", " Language options:\n", " 1. id = Indonesian.\n", " 2. en = English.\n", " \"\"\"\n", " def process_polarized_values(row, method):\n", " \"\"\"Processes the polarization values according to the selected method.\"\"\"\n", " values = ast.literal_eval(row['polarized']) if isinstance(row['polarized'], str) else row['polarized']\n", " values = [int(x) for x in values]\n", " if not values:\n", " return 0 # Default for missing or empty polarization\n", " \n", " agg_value = sum(values) / len(values)\n", " if method == \"agg\":\n", " return agg_value\n", " elif method == \"bin\":\n", " return 1 if agg_value > 0.5 else 0\n", " elif method == \"bin-ceil\":\n", " return 1 if agg_value >= 0.5 else 0\n", " elif method == \"any\":\n", " return 1 if agg_value > 0 else 0\n", " else:\n", " raise ValueError(f\"Unsupported method: {method}\")\n", "\n", " merged_df['polarized'] = merged_df['polarized'].fillna(0)\n", " merged_df['polarized_value'] = merged_df.apply(lambda row: process_polarized_values(row, method), axis=1)\n", "\n", " # Format text based on language and add polarization\n", " def format_text(row, language):\n", " if language == \"id\":\n", " return f\"Nilai polarisasi rata-rata (rentang 0 hingga 1): {row['polarized_value']}\"\n", " elif language == \"en\":\n", " return f\"Average polarization value (range of 0 to 1): {row['polarized_value']}\"\n", " else:\n", " raise ValueError(f\"Unsupported language: {language}\")\n", "\n", " merged_df['combined_text'] = merged_df.apply(lambda row: format_text(row, language), axis=1)\n", "\n", " # Prepare outputs\n", " texts = merged_df['combined_text'].tolist()\n", " labels = merged_df['label'].tolist()\n", "\n", " return merged_df, texts, labels\n", "\n", "\n", "def exploded_df_train_baseline_bert_pipeline_with_polarization_and_demographic_feature(model_path: str, \n", " merged_df, \n", " output_dir: str,\n", " demographic: List[str] = [], \n", " method: str = \"agg\",\n", " language: str = \"id\",\n", " raw_test: bool = False):\n", " # Handle texts and labels\n", " original_df, texts, labels = polarity_text_handler(merged_df, method, language) # Just using this to ensure replicability with old baseline\n", "\n", " # Create output directory\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\n", " metrics_list = []\n", "\n", " for fold, (train_index, test_index) in enumerate(skf.split(texts, labels)):\n", " train_df = original_df.iloc[train_index]\n", " test_df = original_df.iloc[test_index]\n", "\n", " train_df = process_and_explode(train_df)\n", " test_df = process_and_explode(test_df)\n", "\n", " train_texts, train_labels = single_level_toxicity_and_demographic_text_handler(train_df, demographic, language)\n", " test_texts, test_labels = single_level_toxicity_and_demographic_text_handler(test_df, demographic, language)\n", " \n", " if raw_test:\n", " test_texts = [text.split('[SEP]')[-1].strip() for text in test_texts]\n", "\n", " train_labels = [int(x) for x in train_labels]\n", " test_labels = [int(x) for x in test_labels]\n", "\n", " # Tokenize\n", " tokenizer = BertTokenizer.from_pretrained(model_path)\n", " train_encodings = tokenizer(list(train_texts), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", " test_encodings = tokenizer(list(test_texts), truncation=True, padding=True, max_length=512, return_tensors='pt')\n", "\n", " train_dataset = Dataset(train_encodings, train_labels)\n", " test_dataset = Dataset(test_encodings, test_labels)\n", "\n", " model = BertForSequenceClassification.from_pretrained(model_path, num_labels=len(set(train_labels)))\n", "\n", " training_args = TrainingArguments(\n", " output_dir=os.path.join(output_dir, f\"temp_model_fold_{fold}\"), # Temporary directory for Trainer\n", " evaluation_strategy=\"epoch\",\n", " per_device_train_batch_size=16,\n", " per_device_eval_batch_size=64,\n", " num_train_epochs=3,\n", " logging_dir=os.path.join(output_dir, f\"logs_fold_{fold}\"),\n", " save_strategy=\"no\", # Prevent model saving during fold training\n", " )\n", "\n", " trainer = Trainer(\n", " model=model,\n", " args=training_args,\n", " train_dataset=train_dataset,\n", " eval_dataset=test_dataset,\n", " compute_metrics=compute_metrics,\n", " )\n", "\n", " # Train and evaluate\n", " trainer.train()\n", " metrics = trainer.evaluate()\n", " metrics_list.append(metrics)\n", "\n", " # Save performance report\n", " pd.DataFrame([metrics]).to_csv(os.path.join(output_dir, f\"performance_fold_{fold}.csv\"), index=False)\n", "\n", " # Calculate average performance metrics\n", " avg_metrics = {metric: np.mean([m[metric] for m in metrics_list]) for metric in metrics_list[0]}\n", " pd.DataFrame([avg_metrics]).to_csv(os.path.join(output_dir, \"average_performance.csv\"), index=False)\n", "\n", "# Dataset class to handle encoding\n", "class Dataset(torch.utils.data.Dataset):\n", " def __init__(self, encodings, labels):\n", " self.encodings = encodings\n", " self.labels = [int(label) for label in labels] \n", "\n", " def __getitem__(self, idx):\n", " item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n", " item['labels'] = torch.tensor(self.labels[idx])\n", " return item\n", "\n", " def __len__(self):\n", " return len(self.labels)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.4" } }, "nbformat": 4, "nbformat_minor": 5 }