from typing import Dict, List, Any
import os
import torch
from transformers import AutoTokenizer, AutoModel
import pandas as pd
import time
import numpy as np
from transformers import GenerationConfig
from P3LIB.precious3_gpt_multi_modal import Custom_MPTForCausalLM


class EndpointHandler:
    def __init__(self, path="insilicomedicine/precious3-gpt", device='cuda:1'):

        self.device = device
        self.model = AutoModel.from_pretrained(path, trust_remote_code=True).to(self.device)
        self.tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
        self.model.config.pad_token_id = self.tokenizer.pad_token_id
        self.model.config.bos_token_id = self.tokenizer.bos_token_id
        self.model.config.eos_token_id = self.tokenizer.eos_token_id

        unique_entities_p3 = pd.read_csv(
            'https://huggingface.co/insilicomedicine/precious3-gpt/raw/main/all_entities_with_type.csv')
        self.unique_compounds_p3 = [i.strip() for i in
                                    unique_entities_p3[unique_entities_p3.type == 'compound'].entity.to_list()]
        self.unique_genes_p3 = [i.strip() for i in
                                unique_entities_p3[unique_entities_p3.type == 'gene'].entity.to_list()]

    def create_prompt(self, prompt_config):

        prompt = "[BOS]"

        multi_modal_prefix = ''

        for k, v in prompt_config.items():
            if k == 'instruction':
                prompt += f'<{v}>' if isinstance(v, str) else "".join([f'<{v_i}>' for v_i in v])
            elif k == 'up':
                if v:
                    prompt += f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v,
                                                                                   str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
            elif k == 'down':
                if v:
                    prompt += f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v,
                                                                                   str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
            elif k == 'age':
                if isinstance(v, int):
                    if prompt_config['species'].strip() == 'human':
                        prompt += f'<{k}_individ>{v} </{k}_individ>'
                    elif prompt_config['species'].strip() == 'macaque':
                        prompt += f'<{k}_individ>Macaca-{int(v / 20)} </{k}_individ>'
            else:
                if v:
                    prompt += f'<{k}>{v.strip()} </{k}>' if isinstance(v, str) else f'<{k}>{" ".join(v)} </{k}>'
                else:
                    prompt += f'<{k}></{k}>'
        return prompt

    def generate_with_generation_config(self, input_ids, generation_config, max_new_tokens, random_seed=138):
        torch.manual_seed(random_seed)

        with torch.no_grad():
            generation_output = self.model.generate(
                input_ids=input_ids,
                generation_config=generation_config,
                return_dict_in_generate=True,
                output_scores=True,
                max_new_tokens=max_new_tokens
            )
        return generation_output

    def get_gene_probabilities(self, prompt_config, top_k=300, list_type='up', random_seed=138):
        """
        Args:
            top_k: how many top probable tokens to take
            list_type: "up" / "down"
        """
        prompt = self.create_prompt(prompt_config)
        assert list_type in ["up", "down"]

        if list_type == 'up':
            prompt += "<up>"

        print(prompt)
        ### Generation config  https://huggingface.co/blog/how-to-generate
        generation_config = GenerationConfig(temperature=0.8, num_beams=1, do_sample=True, top_p=None, top_k=3550,
                                             pad_token_id=self.tokenizer.pad_token_id, num_return_sequences=1)
        inputs = self.tokenizer(prompt, return_tensors="pt")
        input_ids = inputs["input_ids"].to(self.device)
        assert 3 not in input_ids[0]
        max_new_tokens = self.model.config.max_seq_len - len(input_ids[0])

        generation_output = self.generate_with_generation_config(input_ids=input_ids,
                                                                 generation_config=generation_config,
                                                                 max_new_tokens=max_new_tokens,
                                                                 random_seed=random_seed)
        #  print(generation_output)
        id_4_gene_token = list(generation_output.sequences[0][len(input_ids[0]) - 1:]).index(
            self.tokenizer.convert_tokens_to_ids([f'<{list_type}>'])[0])
        id_4_gene_token += 1
        print('This is token index where gene should be predicted: ', id_4_gene_token)

        values, indices = torch.topk(generation_output["scores"][id_4_gene_token - 1].view(-1), k=top_k)
        indices_decoded = self.tokenizer.decode(indices, skip_special_tokens=True)
        indices_decoded_list = indices_decoded.split('  ')

        generated_genes = sorted(set(indices_decoded_list) & set(self.unique_genes_p3), key=indices_decoded_list.index)
        return generated_genes


class HFEndpointHandler:
    def __init__(self, path="insilicomedicine/precious3-gpt", device='cuda:1'):
    
        self.device = device
        self.model = AutoModel.from_pretrained(path, trust_remote_code=True).to(self.device)
        self.tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
        self.model.config.pad_token_id = self.tokenizer.pad_token_id
        self.model.config.bos_token_id = self.tokenizer.bos_token_id
        self.model.config.eos_token_id = self.tokenizer.eos_token_id
        
        unique_entities_p3 = pd.read_csv('https://huggingface.co/insilicomedicine/precious3-gpt/raw/main/all_entities_with_type.csv')
        self.unique_compounds_p3 = [i.strip() for i in unique_entities_p3[unique_entities_p3.type=='compound'].entity.to_list()]
        self.unique_genes_p3 = [i.strip() for i in unique_entities_p3[unique_entities_p3.type=='gene'].entity.to_list()]


    def create_prompt(self, prompt_config):

        prompt = "[BOS]"

        multi_modal_prefix = ''

        for k, v in prompt_config.items():
            if k=='instruction':
                prompt+=f'<{v}>' if isinstance(v, str) else "".join([f'<{v_i}>' for v_i in v])
            elif k=='up':
                if v:
                    prompt+=f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v, str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
            elif k=='down':
                if v:
                    prompt+=f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v, str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
            elif k=='age':
                if isinstance(v, int):
                    if prompt_config['species'].strip() == 'human':
                        prompt+=f'<{k}_individ>{v} </{k}_individ>'
                    elif prompt_config['species'].strip() == 'macaque':
                        prompt+=f'<{k}_individ>Macaca-{int(v/20)} </{k}_individ>'
            else:
                if v:
                    prompt+=f'<{k}>{v.strip()} </{k}>' if isinstance(v, str) else f'<{k}>{" ".join(v)} </{k}>'
                else:
                    prompt+=f'<{k}></{k}>'
        return prompt

    def custom_generate(self,
                        input_ids, 
                        device, 
                        max_new_tokens,
                        mode, 
                        temperature=0.8, 
                        top_p=0.2, top_k=3550, 
                        n_next_tokens=30, num_return_sequences=1, random_seed=138):

        torch.manual_seed(random_seed)

        # Set parameters
        # temperature - Higher value for more randomness, lower for more control
        # top_p - Probability threshold for nucleus sampling (aka top-p sampling)
        # top_k - Ignore logits below the top-k value to reduce randomness (if non-zero)
        # n_next_tokens - Number of top next tokens when predicting compounds

        # Generate sequences
        outputs = []
        next_token_compounds = []
        next_token_up_genes = [] 
        next_token_down_genes = [] 
    
        for _ in range(num_return_sequences):
            start_time = time.time()
            generated_sequence = []
            current_token = input_ids.clone()

            for _ in range(max_new_tokens):  # Maximum length of generated sequence
                # Forward pass through the model
                logits = self.model.forward(
                    input_ids=current_token
                )[0]

                # Apply temperature to logits
                if temperature != 1.0:
                    logits = logits / temperature

                # Apply top-p sampling (nucleus sampling)
                sorted_logits, sorted_indices = torch.sort(logits, descending=True)
                cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
                sorted_indices_to_remove = cumulative_probs > top_p

                if top_k > 0:
                    sorted_indices_to_remove[..., top_k:] = 1

                # Set the logit values of the removed indices to a very small negative value
                inf_tensor = torch.tensor(float("-inf")).type(torch.bfloat16).to(logits.device)

                logits = logits.where(sorted_indices_to_remove, inf_tensor)
                
                # Sample the next token
                if current_token[0][-1] == self.tokenizer.encode('<drug>')[0] and len(next_token_compounds)==0:
                    next_token_compounds.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)

                # Sample the next token for UP genes
                if current_token[0][-1] == self.tokenizer.encode('<up>')[0] and len(next_token_up_genes)==0:
                    next_token_up_genes.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)
                    
                # Sample the next token for DOWN genes
                if current_token[0][-1] == self.tokenizer.encode('<down>')[0] and len(next_token_down_genes)==0:
                    next_token_down_genes.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)

                next_token = torch.multinomial(torch.softmax(logits, dim=-1)[0], num_samples=1)[len(current_token[0])-1, :].unsqueeze(0)


                # Append the sampled token to the generated sequence
                generated_sequence.append(next_token.item())

                # Stop generation if an end token is generated
                if next_token == self.tokenizer.eos_token_id:
                    break

                # Prepare input for the next iteration
                current_token = torch.cat((current_token, next_token), dim=-1)
            print(time.time()-start_time)
            outputs.append(generated_sequence)
        
        # Process generated up/down lists
        processed_outputs = {"up": [], "down": []}
        if mode in ['meta2diff', 'meta2diff2compound']:
            
            
            predicted_up_genes_tokens = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_up_genes]
            predicted_up_genes = []
            for j in predicted_up_genes_tokens:
                generated_up_sample = [i.strip() for i in j]
                predicted_up_genes.append(sorted(set(generated_up_sample) & set(self.unique_genes_p3), key = generated_up_sample.index))
            processed_outputs['up'] = predicted_up_genes

            predicted_down_genes_tokens = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_down_genes]
            predicted_down_genes = []
            for j in predicted_down_genes_tokens:
                generated_down_sample = [i.strip() for i in j]
                predicted_down_genes.append(sorted(set(generated_down_sample) & set(self.unique_genes_p3), key = generated_down_sample.index))
            processed_outputs['down'] = predicted_down_genes
                
        else:
            processed_outputs = outputs
        
        predicted_compounds_ids = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_compounds]
        predicted_compounds = []
        for j in predicted_compounds_ids:
            predicted_compounds.append([i.strip() for i in j])
            
        return processed_outputs, predicted_compounds, random_seed


    def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
        """
        Args:
            data (:dict:):
                The payload with the text prompt and generation parameters.
        """
        
        data = data.copy()
        
        parameters = data.pop("parameters", None)
        config_data = data.pop("inputs", None)
        mode = data.pop('mode', 'Not specified')
        
        prompt = self.create_prompt(config_data)
        if mode != "diff2compound":
            prompt+="<up>"
        
        inputs = self.tokenizer(prompt, return_tensors="pt")
        input_ids = inputs["input_ids"].to(self.device)

        max_new_tokens = self.model.config.max_seq_len - len(input_ids[0]) 
        try:
        
            generated_sequence, raw_next_token_generation, out_seed = self.custom_generate(input_ids = input_ids, 
                                                                                           max_new_tokens=max_new_tokens, mode=mode,
                                                                                           device=self.device, **parameters)
            next_token_generation = [sorted(set(i) & set(self.unique_compounds_p3), key = i.index) for i in raw_next_token_generation]

            if mode == "meta2diff":
                outputs = {"up": generated_sequence['up'], "down": generated_sequence['down']}
                out = {"output": outputs, "mode": mode, "message": "Done!", "input": prompt, 'random_seed': out_seed}
            elif mode == "meta2diff2compound":
                outputs = {"up": generated_sequence['up'], "down": generated_sequence['down']}
                out = {
                "output": outputs, "compounds": next_token_generation, "raw_output": raw_next_token_generation, "mode": mode, 
                    "message": "Done!", "input": prompt, 'random_seed': out_seed}
            elif mode == "diff2compound":
                outputs = generated_sequence
                out = {
                "output": outputs, "compounds": next_token_generation, "raw_output": raw_next_token_generation, "mode": mode, 
                    "message": "Done!", "input": prompt, 'random_seed': out_seed}
            else:
                out = {"message": f"Specify one of the following modes: meta2diff, meta2diff2compound, diff2compound. Your mode is: {mode}"}

        except Exception as e:
            print(e)
            outputs, next_token_generation = [None], [None]
            out = {"output": outputs, "mode": mode, 'message': f"{e}", "input": prompt, 'random_seed': 138}

        return out

class MMEndpointHandler:
    def __init__(self, path="insilicomedicine/precious3-gpt-multi-modal", device='cuda:3'):

        self.device = device
        self.path = path
        # load model and processor from path
        self.model = Custom_MPTForCausalLM.from_pretrained(path, torch_dtype=torch.bfloat16).to(self.device)
        self.tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
        self.model.config.pad_token_id = self.tokenizer.pad_token_id
        self.model.config.bos_token_id = self.tokenizer.bos_token_id
        self.model.config.eos_token_id = self.tokenizer.eos_token_id
        unique_entities_p3 = pd.read_csv('https://huggingface.co/insilicomedicine/precious3-gpt/raw/main/all_entities_with_type.csv')
        self.unique_compounds_p3 = [i.strip() for i in unique_entities_p3[unique_entities_p3.type=='compound'].entity.to_list()]
        self.unique_genes_p3 = [i.strip() for i in unique_entities_p3[unique_entities_p3.type=='gene'].entity.to_list()]
        
        self.emb_gpt_genes = pd.read_pickle('https://huggingface.co/insilicomedicine/precious3-gpt-multi-modal/resolve/main/multi-modal-data/emb_gpt_genes.pickle')
        self.emb_hgt_genes = pd.read_pickle('https://huggingface.co/insilicomedicine/precious3-gpt-multi-modal/resolve/main/multi-modal-data/emb_hgt_genes.pickle')


    def create_prompt(self, prompt_config):

        prompt = "[BOS]"

        multi_modal_prefix = '<modality0><modality1><modality2><modality3>'*3

        for k, v in prompt_config.items():
            if k=='instruction':
                prompt+=f'<{v}>' if isinstance(v, str) else "".join([f'<{v_i}>' for v_i in v])
            elif k=='up':
                if v:
                    prompt+=f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v, str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
            elif k=='down':
                if v:
                    prompt+=f'{multi_modal_prefix}<{k}>{v} </{k}>' if isinstance(v, str) else f'{multi_modal_prefix}<{k}>{" ".join(v)} </{k}>'
            elif k=='age':
                if isinstance(v, int):
                    if prompt_config['species'].strip() == 'human':
                        prompt+=f'<{k}_individ>{v} </{k}_individ>'
                    elif prompt_config['species'].strip() == 'macaque':
                        prompt+=f'<{k}_individ>Macaca-{int(v/20)} </{k}_individ>'
            else:
                if v:
                    prompt+=f'<{k}>{v.strip()} </{k}>' if isinstance(v, str) else f'<{k}>{" ".join(v)} </{k}>'
                else:
                    prompt+=f'<{k}></{k}>'
        return prompt

    def custom_generate(self,
                        input_ids, 
                        acc_embs_up_kg_mean, 
                        acc_embs_down_kg_mean, 
                        acc_embs_up_txt_mean, 
                        acc_embs_down_txt_mean,
                        device, 
                        max_new_tokens,
                        mode, 
                        temperature=0.8, 
                        top_p=0.2, top_k=3550, 
                        n_next_tokens=50, num_return_sequences=1, random_seed=138):

        torch.manual_seed(random_seed)

        # Set parameters
        # temperature - Higher value for more randomness, lower for more control
        # top_p - Probability threshold for nucleus sampling (aka top-p sampling)
        # top_k - Ignore logits below the top-k value to reduce randomness (if non-zero)
        # n_next_tokens - Number of top next tokens when predicting compounds

        modality0_emb = torch.unsqueeze(torch.from_numpy(acc_embs_up_kg_mean), 0).to(device) if isinstance(acc_embs_up_kg_mean, np.ndarray) else None
        modality1_emb = torch.unsqueeze(torch.from_numpy(acc_embs_down_kg_mean), 0).to(device) if isinstance(acc_embs_down_kg_mean, np.ndarray) else None
        modality2_emb = torch.unsqueeze(torch.from_numpy(acc_embs_up_txt_mean), 0).to(device) if isinstance(acc_embs_up_txt_mean, np.ndarray) else None
        modality3_emb = torch.unsqueeze(torch.from_numpy(acc_embs_down_txt_mean), 0).to(device) if isinstance(acc_embs_down_txt_mean, np.ndarray) else None


        # Generate sequences
        outputs = []
        next_token_compounds = []
        next_token_up_genes = [] 
        next_token_down_genes = []

        for _ in range(num_return_sequences):
            start_time = time.time()
            generated_sequence = []
            current_token = input_ids.clone()

            for _ in range(max_new_tokens):  # Maximum length of generated sequence
                # Forward pass through the model
                logits = self.model.forward(
                    input_ids=current_token, 
                    modality0_emb=modality0_emb,
                    modality0_token_id=self.tokenizer.encode('<modality0>')[0], # 62191, 
                    modality1_emb=modality1_emb,
                    modality1_token_id=self.tokenizer.encode('<modality1>')[0], # 62192,
                    modality2_emb=modality2_emb,
                    modality2_token_id=self.tokenizer.encode('<modality2>')[0], # 62193, 
                    modality3_emb=modality3_emb,
                    modality3_token_id=self.tokenizer.encode('<modality3>')[0], # 62194
                )[0]

                # Apply temperature to logits
                if temperature != 1.0:
                    logits = logits / temperature

                # Apply top-p sampling (nucleus sampling)
                sorted_logits, sorted_indices = torch.sort(logits, descending=True)
                cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
                sorted_indices_to_remove = cumulative_probs > top_p

                if top_k > 0:
                    sorted_indices_to_remove[..., top_k:] = 1

                # Set the logit values of the removed indices to a very small negative value
                inf_tensor = torch.tensor(float("-inf")).type(torch.bfloat16).to(logits.device)

                logits = logits.where(sorted_indices_to_remove, inf_tensor)


                # Sample the next token
                if current_token[0][-1] == self.tokenizer.encode('<drug>')[0] and len(next_token_compounds)==0:
                    next_token_compounds.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)

                # Sample the next token for UP genes
                if current_token[0][-1] == self.tokenizer.encode('<up>')[0] and len(next_token_up_genes)==0:
                    next_token_up_genes.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)
                    
                # Sample the next token for DOWN genes
                if current_token[0][-1] == self.tokenizer.encode('<down>')[0] and len(next_token_down_genes)==0:
                    next_token_down_genes.append(torch.topk(torch.softmax(logits, dim=-1)[0][len(current_token[0])-1, :].flatten(), n_next_tokens).indices)

                next_token = torch.multinomial(torch.softmax(logits, dim=-1)[0], num_samples=1)[len(current_token[0])-1, :].unsqueeze(0)


                # Append the sampled token to the generated sequence
                generated_sequence.append(next_token.item())

                # Stop generation if an end token is generated
                if next_token == self.tokenizer.eos_token_id:
                    break

                # Prepare input for the next iteration
                current_token = torch.cat((current_token, next_token), dim=-1)
            print(time.time()-start_time)
            outputs.append(generated_sequence)
        
        # Process generated up/down lists
        processed_outputs = {"up": [], "down": []}
        if mode in ['meta2diff', 'meta2diff2compound']:
            predicted_up_genes_tokens = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_up_genes]
            predicted_up_genes = []
            for j in predicted_up_genes_tokens:
                generated_up_sample = [i.strip() for i in j]
                predicted_up_genes.append(sorted(set(generated_up_sample) & set(self.unique_genes_p3), key = generated_up_sample.index))
            processed_outputs['up'] = predicted_up_genes

            predicted_down_genes_tokens = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_down_genes]
            predicted_down_genes = []
            for j in predicted_down_genes_tokens:
                generated_down_sample = [i.strip() for i in j]
                predicted_down_genes.append(sorted(set(generated_down_sample) & set(self.unique_genes_p3), key = generated_down_sample.index))
            processed_outputs['down'] = predicted_down_genes
                
        else:
            processed_outputs = outputs
        
        predicted_compounds_ids = [self.tokenizer.convert_ids_to_tokens(j) for j in next_token_compounds]
        predicted_compounds = []
        for j in predicted_compounds_ids:
            predicted_compounds.append([i.strip() for i in j])
            
        return processed_outputs, predicted_compounds, random_seed


    def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
        """
        Args:
            data (:dict:):
                The payload with the text prompt and generation parameters.
        """
        data = data.copy()
        parameters = data.pop("parameters", None)
        config_data = data.pop("inputs", None)
        mode = data.pop('mode', 'Not specified')
        
        prompt = self.create_prompt(config_data)
        if mode != "diff2compound":
            prompt+="<up>"
        
        inputs = self.tokenizer(prompt, return_tensors="pt")
        input_ids = inputs["input_ids"].to(self.device)

        max_new_tokens = self.model.config.max_seq_len - len(input_ids[0]) 
        try:
            if set(["up", "down"]) & set(config_data.keys()):
                acc_embs_up1 = []
                acc_embs_up2 = []
                for gs in config_data['up']: 
                    try:
                        acc_embs_up1.append(self.emb_hgt_genes[self.emb_hgt_genes.gene_symbol==gs].embs.values[0])
                        acc_embs_up2.append(self.emb_gpt_genes[self.emb_gpt_genes.gene_symbol==gs].embs.values[0])
                    except Exception as e: 
                        pass
                acc_embs_up1_mean = np.array(acc_embs_up1).mean(0) if acc_embs_up1 else None
                acc_embs_up2_mean = np.array(acc_embs_up2).mean(0) if acc_embs_up2 else None

                acc_embs_down1 = []
                acc_embs_down2 = []
                for gs in config_data['down']:
                    try:
                        acc_embs_down1.append(self.emb_hgt_genes[self.emb_hgt_genes.gene_symbol==gs].embs.values[0])
                        acc_embs_down2.append(self.emb_gpt_genes[self.emb_gpt_genes.gene_symbol==gs].embs.values[0])
                    except Exception as e: 
                        pass
                acc_embs_down1_mean = np.array(acc_embs_down1).mean(0) if acc_embs_down1 else None
                acc_embs_down2_mean = np.array(acc_embs_down2).mean(0) if acc_embs_down2 else None
            else:
                acc_embs_up1_mean, acc_embs_up2_mean, acc_embs_down1_mean, acc_embs_down2_mean = None, None, None, None

            generated_sequence, raw_next_token_generation, out_seed = self.custom_generate(input_ids = input_ids, 
                                                             acc_embs_up_kg_mean=acc_embs_up1_mean,
                                                             acc_embs_down_kg_mean=acc_embs_down1_mean, 
                                                             acc_embs_up_txt_mean=acc_embs_up2_mean,
                                                             acc_embs_down_txt_mean=acc_embs_down2_mean, max_new_tokens=max_new_tokens, mode=mode,
                                                             device=self.device, **parameters)
            next_token_generation = [sorted(set(i) & set(self.unique_compounds_p3), key = i.index) for i in raw_next_token_generation]

            if mode == "meta2diff":
                outputs = {"up": generated_sequence['up'], "down": generated_sequence['down']}
                out = {"output": outputs, "mode": mode, "message": "Done!", "input": prompt, 'random_seed': out_seed}
            elif mode == "meta2diff2compound":
                outputs = {"up": generated_sequence['up'], "down": generated_sequence['down']}
                out = {
                "output": outputs, "compounds": next_token_generation, "raw_output": raw_next_token_generation, "mode": mode, 
                    "message": "Done!", "input": prompt, 'random_seed': out_seed}
            elif mode == "diff2compound":
                outputs = generated_sequence
                out = {
                "output": outputs, "compounds": next_token_generation, "raw_output": raw_next_token_generation, "mode": mode, 
                    "message": "Done!", "input": prompt, 'random_seed': out_seed}
            else:
                out = {"message": f"Specify one of the following modes: meta2diff, meta2diff2compound, diff2compound. Your mode is: {mode}"}

        except Exception as e:
            print(e)
            outputs, next_token_generation = [None], [None]
            out = {"output": outputs, "mode": mode, 'message': f"{e}", "input": prompt, 'random_seed': 138}

        return out

def main():
    pass

if __name__=="__main__":
    main()