hexabot/nlu/models/slot_filler.py

266 lines
8.9 KiB
Python
Raw Normal View History

import os
2024-09-10 09:50:11 +00:00
import functools
import json
2024-09-19 08:14:17 +00:00
from transformers import TFBertModel, AutoTokenizer
2024-09-10 09:50:11 +00:00
from keras.layers import Dropout, Dense
from sys import platform
if platform == "darwin":
from keras.optimizers.legacy import Adam
else:
from keras.optimizers import Adam
from keras.losses import SparseCategoricalCrossentropy
from keras.metrics import SparseCategoricalAccuracy
import numpy as np
from data_loaders.jisfdl import JISFDL
2024-09-19 08:14:17 +00:00
from sklearn.metrics import classification_report
2024-09-10 09:50:11 +00:00
import boilerplate as tfbp
##
2024-09-19 08:14:17 +00:00
# Slot filling with BERT
2024-09-10 09:50:11 +00:00
# This notebook is based on the paper BERT for Joint Intent Classification and Slot Filling by Chen et al. (2019),
# https://arxiv.org/abs/1902.10909 but on a different dataset made for a class project.
#
# Ideas were also taken from https://github.com/monologg/JointBERT, which is a PyTorch implementation of
# the paper with the original dataset.
##
@tfbp.default_export
2024-09-19 08:14:17 +00:00
class SlotFiller(tfbp.Model):
2024-09-10 09:50:11 +00:00
default_hparams = {
2024-09-19 09:52:18 +00:00
"language": "",
2024-09-10 09:50:11 +00:00
"num_epochs": 2,
"dropout_prob": 0.1,
"slot_num_labels": 40
}
data_loader: JISFDL
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Init data loader
self.data_loader = JISFDL(**kwargs)
# Load Tokenizer from transformers
# We will use a pretrained bert model bert-base-cased for both Tokenizer and our classifier.
# Read the environment variable
bert_model_by_language_json = os.getenv('BERT_MODEL_BY_LANGUAGE_JSON')
# Check if the environment variable is set
if not bert_model_by_language_json:
raise ValueError("The BERT_MODEL_BY_LANGUAGE_JSON environment variable is not set.")
# Parse the JSON string into a Python dictionary
try:
bert_models = json.loads(bert_model_by_language_json)
except json.JSONDecodeError as e:
raise ValueError(f"Failed to parse BERT_MODEL_BY_LANGUAGE_JSON: {e}")
try:
bert_model_name = bert_models[self.hparams.language or "en"]
except json.JSONDecodeError as e:
raise ValueError(f"No Bert model is available for the provided language: {e}")
2024-09-19 08:14:17 +00:00
2024-09-10 09:50:11 +00:00
self.tokenizer = AutoTokenizer.from_pretrained(
bert_model_name, use_fast=False)
self.bert = TFBertModel.from_pretrained(bert_model_name)
self.dropout = Dropout(self.hparams.dropout_prob)
self.slot_classifier = Dense(self.hparams.slot_num_labels,
name="slot_classifier", activation="softmax")
def call(self, inputs, **kwargs):
trained_bert = self.bert(inputs, **kwargs)
sequence_output = trained_bert.last_hidden_state
2024-09-19 08:14:17 +00:00
# sequence_output will be used for slot_filling
2024-09-10 09:50:11 +00:00
sequence_output = self.dropout(sequence_output,
training=kwargs.get("training", False))
slot_probas = self.slot_classifier(sequence_output)
2024-09-19 08:14:17 +00:00
return slot_probas
2024-09-10 09:50:11 +00:00
@tfbp.runnable
def fit(self):
"""Training"""
encoded_texts, encoded_intents, encoded_slots, intent_names, slot_names = self.data_loader(
self.tokenizer)
if self.hparams.slot_num_labels != len(slot_names):
raise ValueError(
f"Hyperparam slot_num_labels mismatch, should be : {len(slot_names)}"
)
# Hyperparams, Optimizer and Loss function
opt = Adam(learning_rate=3e-5, epsilon=1e-08)
# two outputs, one for slots, another for intents
# we have to fine tune for both
2024-09-19 08:14:17 +00:00
losses = SparseCategoricalCrossentropy()
2024-09-10 09:50:11 +00:00
metrics = [SparseCategoricalAccuracy("accuracy")]
# Compile model
self.compile(optimizer=opt, loss=losses, metrics=metrics)
x = {"input_ids": encoded_texts["input_ids"], "token_type_ids": encoded_texts["token_type_ids"],
"attention_mask": encoded_texts["attention_mask"]}
super().fit(
2024-09-19 08:14:17 +00:00
x, encoded_slots, epochs=self.hparams.num_epochs, batch_size=32, shuffle=True)
2024-09-10 09:50:11 +00:00
# Persist the model
self.extra_params["slot_names"] = slot_names
self.save()
@tfbp.runnable
def evaluate(self):
2024-09-19 08:14:17 +00:00
"""Evaluation"""
# Load test data
# Assuming your data loader can return test data when mode='test' is specified
encoded_texts, _, encoded_slots, _, slot_names = self.data_loader(
2024-09-10 09:50:11 +00:00
self.tokenizer, self.extra_params)
2024-09-19 08:14:17 +00:00
# Get predictions
predictions = self(encoded_texts)
predicted_slot_ids = np.argmax(predictions, axis=-1) # Shape: (batch_size, sequence_length)
true_labels = encoded_slots.flatten()
pred_labels = predicted_slot_ids.flatten()
2024-09-10 09:50:11 +00:00
2024-09-19 08:14:17 +00:00
# Filter out padding tokens (assuming padding label id is 0)
mask = true_labels != 0
filtered_true_labels = true_labels[mask]
filtered_pred_labels = pred_labels[mask]
2024-09-10 09:50:11 +00:00
2024-09-19 08:14:17 +00:00
# Adjust labels to start from 0 (since padding label 0 is removed)
filtered_true_labels -= 1
filtered_pred_labels -= 1
2024-09-10 09:50:11 +00:00
2024-09-19 08:14:17 +00:00
# Get slot names excluding padding
slot_names_no_pad = self.extra_params["slot_names"][1:] # Exclude padding label
2024-09-10 09:50:11 +00:00
2024-09-19 08:14:17 +00:00
report = classification_report(
filtered_true_labels,
filtered_pred_labels,
target_names=slot_names_no_pad,
zero_division=0
)
2024-09-10 09:50:11 +00:00
2024-09-19 08:14:17 +00:00
print(report)
# Optionally, you can return the report as a string or dictionary
return report
2024-09-10 09:50:11 +00:00
@tfbp.runnable
def predict(self):
2024-10-22 10:57:30 +00:00
while True:
text = input("Provide text: ")
info = self.get_prediction(text)
2024-09-10 09:50:11 +00:00
2024-10-22 10:57:30 +00:00
print(self.summary())
print("Text : " + text)
print(info)
# Optionally, provide a way to exit the loop
if input("Try again? (y/n): ").lower() != 'y':
break
2024-09-10 09:50:11 +00:00
def get_slots_prediction(self, text: str, inputs, slot_probas):
slot_probas_np = slot_probas.numpy()
# Get the indices of the maximum values
slot_ids = slot_probas_np.argmax(axis=-1)[0, :]
# get all slot names and add to out_dict as keys
out_dict = {}
predicted_slots = set([self.extra_params["slot_names"][s]
for s in slot_ids if s != 0])
for ps in predicted_slots:
out_dict[ps] = []
# retrieving the tokenization that was used in the predictions
tokens = self.tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
# We'd like to eliminate all special tokens from our output
special_tokens = self.tokenizer.special_tokens_map.values()
for token, slot_id in zip(tokens, slot_ids):
if token in special_tokens:
continue
# add all to out_dict
slot_name = self.extra_params["slot_names"][slot_id]
if slot_name == "<PAD>":
continue
# collect tokens
collected_tokens = [token]
idx = tokens.index(token)
# see if it starts with ##
# then it belongs to the previous token
if token.startswith("##"):
# check if the token already exists or not
if tokens[idx - 1] not in out_dict[slot_name]:
collected_tokens.insert(0, tokens[idx - 1])
# add collected tokens to slots
out_dict[slot_name].extend(collected_tokens)
slot_names_to_ids = {value: key for key, value in enumerate(
self.extra_params["slot_names"])}
entities = []
# process out_dict
for slot_name in out_dict:
slot_id = slot_names_to_ids[slot_name]
slot_tokens = out_dict[slot_name]
slot_value = self.tokenizer.convert_tokens_to_string(
slot_tokens).strip()
entity = {
"entity": slot_name,
"value": slot_value,
"start": text.find(slot_value),
"end": text.find(slot_value) + len(slot_value),
"confidence": 0,
}
# The confidence of a slot is the average confidence of tokens in that slot.
indices = [tokens.index(token) for token in slot_tokens]
if len(slot_tokens) > 0:
total = functools.reduce(
lambda proba1, proba2: proba1+proba2, slot_probas_np[0, indices, slot_id], 0)
entity["confidence"] = total / len(slot_tokens)
else:
entity["confidence"] = 0
entities.append(entity)
return entities
def get_prediction(self, text: str):
inputs = self.data_loader.encode_text(text, self.tokenizer)
2024-09-19 08:14:17 +00:00
slot_probas = self(inputs) # type: ignore
2024-09-10 09:50:11 +00:00
entities = []
if slot_probas is not None:
entities = self.get_slots_prediction(text, inputs, slot_probas)
return {
"text": text,
"entities": entities,
}