Edit model card

language: - en - fr

Title-Paragraph Segmentation Model - ver 1.0

Formal Content Segmentation Model that belongs to first order segmentation (FOS) model family. It performs title-paragraph separation task.

Architecture:

  • E5-base Cross Encoder

Dataset:

  • Custom Constrative Title-Paragraph Dataset based off wikitext

Performance:

  • 89% acc on test set

Broader context:

  1. The aim of FOS is to separate content types featured in raw instructured strings such as:
  • text
  • code
  • tables
  • list
  • math formulas
  • images
  1. This will enable further processings such as second order segmentation (SOS) that aims at generating semantic frontiers i.e segmenting:
  • plain text into knowledge units
  • code into functional blocks
  • math formulas blocks into equations
  • objects/concepts within an image
  • videos into timestamped chapters

Model Details

Direct Use

Setup and Utilities
from transformers import XLMRobertaPreTrainedModel, XLMRobertaModel, AutoTokenizer
from nltk.tokenize import line_tokenize
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from datasets import Dataset


#Utility Functions


def get_default_device():
    if torch.cuda.is_available():
        return torch.device('cuda')
    elif torch.backends.mps.is_available():
        return torch.device('mps')
    else:
        return torch.device('cpu')
    
def to_device(data, device):
    if isinstance(data, (list,tuple)):
        return [to_device(x, device) for x in data]
    elif isinstance(data, dict):
        return {'input_ids':to_device(data['input_ids'],device),'attention_mask':to_device(data['attention_mask'],device)}
    return data.to(device)

class DeviceDataLoader():
    def __init__(self, dl, device):
        self.dl = dl
        self.device = device
        
    def __iter__(self):
        for b in self.dl: 
            yield to_device(b, self.device)

    def __len__(self):
        return len(self.dl)

class IsoBN(nn.Module):
    def __init__(self, hidden_size):
        """Init method"""
        super().__init__()
        self.register_parameter(name='cov', param=torch.nn.Parameter(torch.zeros(hidden_size, hidden_size)))
        self.register_parameter(name='std', param=torch.nn.Parameter(torch.zeros(hidden_size)))

        self.cov.requires_grad = False
        self.std.requires_grad = False

    def forward(self, input, momentum: float = 0.05, eps: float = 1e-3, beta: float = 0.5):
        """Forward method"""
        if self.training:
            x = input.detach()
            n = x.size(0)
            mean = x.mean(dim=0)
            y = x - mean.unsqueeze(0)
            std = (y ** 2).mean(0) ** 0.5
            cov = (y.t() @ y) / n
            self.cov.data += momentum * (cov.data - self.cov.data)
            self.std.data += momentum * (std.data - self.std.data)
        corr = torch.clamp(self.cov / torch.ger(self.std, self.std), -1, 1)
        gamma = (corr ** 2).mean(1)
        denorm = (gamma * self.std)
        scale = 1 / (denorm + eps) ** beta
        E = torch.diag(self.cov).sum()
        new_E = (torch.diag(self.cov) * (scale ** 2)).sum()
        m = (E / (new_E + eps)) ** 0.5
        scale *= m
        return input * scale.unsqueeze(0).detach()

class e5_base_CTSEG(XLMRobertaPreTrainedModel):
    
    def __init__(self, config):
        
        super().__init__(config)
        
        self.e5 = XLMRobertaModel(config).from_pretrained('intfloat/multilingual-e5-base')
        self.dropout = nn.Dropout(0.5)
        self.linear_1 = nn.Linear(768,256)
        self.linear_2 = nn.Linear(256,128)
        self.linear_3 = nn.Linear(128,2)
        self.relu = nn.ReLU()
        self.isobn = IsoBN(768)
        
    def forward(self, sent):
        
        sent['input_ids'] = sent['input_ids'].reshape(sent['input_ids'].shape[0],-1)
        sent['attention_mask'] = sent['attention_mask'].reshape(sent['attention_mask'].shape[0],-1)
        
        hs= self.e5(input_ids=sent['input_ids'], attention_mask=sent['attention_mask'])
        cls_hs = hs.last_hidden_state[:, 0]
        cls_hs = self.isobn(cls_hs)
        
    
        out = self.linear_1(cls_hs)
        out = self.relu(out)
        out = self.dropout(out)
        out = self.linear_2(out)
        out = self.relu(out)
        out = self.dropout(out)
        out = self.linear_3(out)
        
        
        return out
    
    
    def training_step(self, sent, labels):
        out = self.forward(sent)
        loss = F.cross_entropy(out, labels)
        return loss
    
    def validation_step(self, sent, labels):
        out = self.forward(sent)
        loss = F.cross_entropy(out, labels)
        acc = accuracy(out, labels)
        return {'val_acc':acc,'val_loss':loss.detach()}
    
    def validation_epoch_end(self, metrics):
        batch_losses = [x['val_loss'] for x in metrics]
        batch_accs =  [x['val_acc'] for x in metrics]
        
        epoch_loss = torch.stack(batch_losses).mean().item()
        epoch_acc = torch.stack(batch_accs).mean().item()
        
        return {'val_loss':epoch_loss, 'val_acc':epoch_acc}
    
    def epoch_end(self, epoch, result):
        
        print("Epoch [{}], train_loss: {:.4f}, val_loss: {:.4f}, val_acc: {:.4f}".format(
            epoch, result['train_loss'], result['val_loss'], result['val_acc']))
        
    def evaluate(self, val_loader):
        self.eval()
        metrics = [self.validation_step(sent,labels.type(torch.LongTensor).to(device, non_blocking=True)) for sent,labels in val_loader]
        return self.validation_epoch_end(metrics)
def accuracy(out, labels):    
    return (out.argmax(dim=1) == labels).sum()/labels.numel()
tokenizer = AutoTokenizer.from_pretrained('intfloat/multilingual-e5-base')
model = e5_base_CTSEG.from_pretrained('ProfessorBob/title-par-segmentation')
device = get_default_device()
to_device(model,device)
def infer_block(
    chunks, 
    batch_size: int = 8, 
    return_probability: bool = False,
    tokenizer = tokenizer
):
    """ Bulk Infer function"""

    tok_text_bulk = tokenizer(
        ['query: ' + sent[0] +'[SEP]'+ sent[1] for sent in chunks],
        padding='max_length',
        truncation=True,
        return_tensors='pt'
    )
    sentences = Dataset.from_dict({
        'input_ids': tok_text_bulk['input_ids'],
        'attention_mask':  tok_text_bulk['attention_mask']
    })
    sentences.set_format(
        'torch', 
        columns=['input_ids','attention_mask']
    )
    sentences = DataLoader(
        sentences, 
        batch_size=batch_size, 
        pin_memory=True
    )
    sentences = DeviceDataLoader(sentences, device)
    preds = list()
    model.eval()
    with torch.no_grad():
        for i, batch in enumerate(sentences):
            out = model(batch)
            if return_probability:
                preds.extend((out.softmax(dim=1).cpu()[:, 1]).tolist())
            else:
                preds.extend(out.argmax(dim=1).cpu().tolist())
    
    if device == torch.device('cuda'):
        torch.cuda.empty_cache()
    assert len(preds) == len(chunks)
    
    return preds, out

def segmentation_pipeline(text):

    block = line_tokenize(text)
    chunks =  [
                    (u, v) for u, v in zip(block[:-1], block[1:])
                ]
    preds, out = infer_block(chunks,return_probability=False)
    cut_idx = [i+1 for i, value in enumerate(preds) if value == 1]
    cut_idx = [0]+cut_idx+[len(block)]
    seg = [block[cut_idx[i]:cut_idx[i+1]] for i in range(len(cut_idx)-1)]

    return seg
Usage example

mixed_string = """

Ancient Foundations (3000 BCE - 600 CE)

In the dawn of human civilization, mathematics emerged as an essential tool for commerce, construction, and astronomy. Explore the mathematical innovations of ancient cultures such as the Babylonians, Egyptians, and Greeks, laying the groundwork for numerical systems, geometry, and the Pythagorean theorem.

The Golden Age of Islamic Mathematics (700 CE - 1300 CE)

Delve into the intellectual flourishing during the Islamic Golden Age, where scholars like Al-Khwarizmi and Omar Khayyam made groundbreaking contributions to algebra, trigonometry, and the development of algorithms. Discover how these advancements paved the way for the Renaissance in Europe.

"""

Generated Title-Paragraph Segmentation

Block 1
-----
Ancient Foundations (3000 BCE - 600 CE)

Block 2
-----
In the dawn of human civilization, mathematics emerged as an essential tool for commerce, construction, and astronomy. Explore the mathematical innovations of ancient cultures such as the Babylonians, Egyptians, and Greeks, laying the groundwork for numerical systems, geometry, and the Pythagorean theorem.

Block 3
-----
The Golden Age of Islamic Mathematics (700 CE - 1300 CE)

Block 4
-----
Delve into the intellectual flourishing during the Islamic Golden Age, where scholars like Al-Khwarizmi and Omar Khayyam made groundbreaking contributions to algebra, trigonometry, and the development of algorithms. Discover how these advancements paved the way for the Renaissance in Europe.
Downloads last month
4
Safetensors
Model size
279M params
Tensor type
F32
·
Inference API
Unable to determine this model’s pipeline type. Check the docs .