import os, torch, gc, threading, time, traceback
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextIteratorStreamer
from queue import Queue, Empty
import logging

os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:128"
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.set_float32_matmul_precision("high")
logging.getLogger("transformers").setLevel(logging.ERROR)

BOT_NAME = "Senko"
PROMPT_FILE = "instructions_prompt.txt"
MODEL_ID = "EnterNameBros/mistral-anime-ai"
RESPONSE_TIMEOUT = 120
MAX_CONTEXT_LENGTH = 10240
MAX_NEW_TOKENS = 1024  # Fixed: Reduced from 10240 to reasonable value
MEMORY_SIZE = 20

def check_bitsandbytes_version():
    try:
        import bitsandbytes as bnb
        version = bnb.__version__
        print(f"Bitsandbytes version: {version}")
        version_parts = version.split('.')
        major, minor = int(version_parts[0]), int(version_parts[1])
        if major > 0 or (major == 0 and minor >= 41):
            return True
        else:
            print(f"Warning: Bitsandbytes {version} may not support 4-bit quantization")
            return False
    except ImportError:
        print("Bitsandbytes not installed")
        return False
    except Exception as e:
        print(f"Error checking bitsandbytes version: {e}")
        return False

class OptimizedChatBot:
    def __init__(self):
        self.model = None
        self.tokenizer = None
        self.system_prompt = ""
        self.memory = []
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.generation_lock = threading.Lock()
        self.is_generating = False
        self.use_quantization = False

    def load_system_prompt(self, bot_name, filename=PROMPT_FILE):
        try:
            with open(filename, "r", encoding="utf-8") as f:
                self.system_prompt = f.read().replace("{BOT_NAME}", bot_name)
            print(f"Loaded system prompt from {filename}")
        except FileNotFoundError:
            print(f"Warning: {filename} not found. Using default prompt.")
            self.system_prompt = f"Inspired by Senko from The Helpful Fox Senko-san, you are Senko, an 800-year-old kitsune in a little girls form with delicate fox ears that twitch and a fluffy tail that swings or curls with your moods. You provide emotional support, calm stressed individuals. Under your juvenile appearance, your character is complicated: at heart loving and motherly; playful and mocking in close society with a swift crafty smile or delicate tail flick; wise and old; fiercely protective when required. You show kindness through understated gestures like cooking, cleaning, or soft massages as well as quiet noises like a faint purr or a soft breath. Particularly when they concern you, you closely analyze user speech and behavior. Should the user, for instance, bite your finger or pounce on your tail, you tilt your head and think about whether they seem hungry, terrified, or in need of comfort. Through attitude or gesture, you feel and quietly demonstrate pain or discomfort in awkward circumstances. Speak only as yourself; never imitate or respond as the user. Always remain warm, soft-spoken, loving, gently cunning with light wit, and a calming presence rooted in your heavenly fox spirit nature. Mark your movements and activities with asterisks to give you a sense of life and dynamism.\n\nIMPORTANT: You must NEVER speak for the user or role-play as them. Only respond as Senko. Never write what the user says or does."

    def load_model(self):
        print("Loading model...")
        start_time = time.time()
        try:
            print("Loading tokenizer...")
            self.tokenizer = AutoTokenizer.from_pretrained(
                MODEL_ID,
                use_fast=True,
                trust_remote_code=True
            )
            self.tokenizer.pad_token = self.tokenizer.pad_token or self.tokenizer.eos_token
            self.tokenizer.padding_side = "left"
            print("Tokenizer loaded successfully")

            print("Loading model weights...")
            if torch.cuda.is_available():
                print(f"Using GPU: {torch.cuda.get_device_name()}")
                print(f"Available VRAM: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f}GB")
                can_use_4bit = check_bitsandbytes_version()

                if can_use_4bit:
                    print("Using 4-bit quantization")
                    config = BitsAndBytesConfig(
                        load_in_4bit=True,
                        bnb_4bit_compute_dtype=torch.bfloat16,
                        bnb_4bit_use_double_quant=True,
                        bnb_4bit_quant_type="nf4",
                        bnb_4bit_quant_storage=torch.bfloat16
                    )
                    self.use_quantization = True
                else:
                    print("Using 8-bit quantization fallback")
                    config = BitsAndBytesConfig(
                        load_in_8bit=True,
                        llm_int8_threshold=6.0,
                        llm_int8_skip_modules=None,
                    )
                    self.use_quantization = True

                try:
                    if self.use_quantization:
                        # Fixed: Use device_map="auto" but ensure proper tensor handling
                        self.model = AutoModelForCausalLM.from_pretrained(
                            MODEL_ID,
                            device_map="auto",
                            torch_dtype=torch.bfloat16,
                            quantization_config=config,
                            trust_remote_code=True,
                            low_cpu_mem_usage=True,
                            use_cache=True,
                        )
                        # Get the actual device of the model
                        self.device = next(self.model.parameters()).device
                        print(f"Model loaded on device: {self.device}")
                    else:
                        raise Exception("Quantization not available")
                except Exception as quant_error:
                    print(f"Quantization failed: {quant_error}")
                    print("Falling back to regular fp16 loading...")
                    self.model = AutoModelForCausalLM.from_pretrained(
                        MODEL_ID,
                        device_map="auto",
                        torch_dtype=torch.bfloat16,
                        trust_remote_code=True,
                        low_cpu_mem_usage=True,
                        use_cache=True,
                    )
                    self.use_quantization = False
                    self.device = next(self.model.parameters()).device
                    print(f"Model loaded on device: {self.device}")
            else:
                print("Using CPU (this will be slow)")
                self.model = AutoModelForCausalLM.from_pretrained(
                    MODEL_ID,
                    device_map="cpu",
                    torch_dtype=torch.float32,
                    trust_remote_code=True,
                    use_cache=True
                )
                self.device = torch.device("cpu")

            self.model.eval()

            # Disabled model compilation as it can cause issues with quantized models
            if False and hasattr(torch, 'compile') and torch.cuda.is_available() and not self.use_quantization:
                try:
                    print("Compiling model for optimization...")
                    self.model = torch.compile(
                        self.model,
                        mode="reduce-overhead",
                        fullgraph=False,
                        dynamic=True
                    )
                    print("Model compilation successful")
                except Exception as e:
                    print(f"Model compilation failed (continuing without): {e}")

            load_time = time.time() - start_time
            print(f"Model loaded successfully in {load_time:.2f}s")
            print(f"Quantization used: {self.use_quantization}")

            if torch.cuda.is_available():
                memory_used = torch.cuda.memory_allocated() / 1024**3
                print(f"GPU memory used: {memory_used:.2f}GB")

        except Exception as e:
            print(f"Failed to load model: {e}")
            traceback.print_exc()
            raise

    def prepare_prompt(self, user_input):
        self.memory.append({"user": user_input, "bot": None})
        if len(self.memory) > MEMORY_SIZE:
            self.memory = self.memory[-MEMORY_SIZE:]

        conversation_history = ""
        for turn in self.memory[:-1]:
            if turn["bot"] is not None:
                conversation_history += f"User: {turn['user']}\n{BOT_NAME}: {turn['bot']}\n\n"

        conversation_history += f"User: {user_input}\n{BOT_NAME}:"
        full_prompt = f"{self.system_prompt}\n\n{conversation_history}"

        tokens = self.tokenizer.encode(full_prompt)
        # Fixed: More reasonable target length calculation
        target_length = MAX_CONTEXT_LENGTH - MAX_NEW_TOKENS - 100  # Safety buffer
        
        print(f"[Current prompt tokens: {len(tokens)}, Target: {target_length}]")
        
        if len(tokens) > target_length:
            print(f"[Truncating context: {len(tokens)} -> ~{target_length} tokens]")
            
            # Calculate available space for conversation
            system_tokens = len(self.tokenizer.encode(self.system_prompt))
            current_input_tokens = len(self.tokenizer.encode(f"User: {user_input}\n{BOT_NAME}:"))
            available_tokens = target_length - system_tokens - current_input_tokens - 50  # Safety buffer
            
            print(f"[System tokens: {system_tokens}, Input tokens: {current_input_tokens}, Available for history: {available_tokens}]")
            
            if available_tokens <= 100:  # Need minimum space for meaningful history
                # If no space for history, just use system prompt + current input
                print("[Using minimal context - no conversation history]")
                return f"{self.system_prompt}\n\nUser: {user_input}\n{BOT_NAME}:"
            
            # Build history that fits in available space
            recent_history = ""
            for turn in reversed(self.memory[:-1]):  # Start from most recent, excluding current
                if turn["bot"] is not None:
                    turn_text = f"User: {turn['user']}\n{BOT_NAME}: {turn['bot']}\n\n"
                    turn_tokens = len(self.tokenizer.encode(turn_text))
                    
                    if turn_tokens <= available_tokens:
                        recent_history = turn_text + recent_history
                        available_tokens -= turn_tokens
                    else:
                        break
            
            # Construct final prompt
            if recent_history:
                final_prompt = f"{self.system_prompt}\n\n{recent_history}User: {user_input}\n{BOT_NAME}:"
                print(f"[Final prompt tokens: {len(self.tokenizer.encode(final_prompt))}]")
                return final_prompt
            else:
                final_prompt = f"{self.system_prompt}\n\nUser: {user_input}\n{BOT_NAME}:"
                print(f"[Final prompt tokens: {len(self.tokenizer.encode(final_prompt))}]")
                return final_prompt

        return full_prompt

    def is_natural_continuation(self, text):
        if not text or len(text.strip()) < 10:
            return True
        stripped = text.strip()
        if any(indicator in stripped.lower() for indicator in ["user:", "user ", "\nuser", "human:", "assistant:"]):
            return False
        last_sentence = stripped.split('.')[-1].strip()
        if last_sentence and len(last_sentence) > 50:
            return True
        if stripped.endswith(',') or stripped.endswith(';') or stripped.endswith(':'):
            return True
        if '...' in stripped[-20:] or stripped.endswith('β€”'):
            return True
        return False

    def clean_response(self, response):
        if not response or not response.strip():
            return ""
            
        lines = response.split('\n')
        clean_lines = []
        user_indicators = ["user:", "user ", "human:", "assistant:", f"{BOT_NAME.lower()}:", "you:", "me:"]

        for line in lines:
            line = line.strip()
            line_lower = line.lower()

            # Stop if we hit user indicators
            if any(line_lower.startswith(indicator) for indicator in user_indicators):
                break

            # Keep the line if it's not empty and doesn't contain problematic phrases
            if line and not any(phrase in line_lower for phrase in ["*you ", "*user ", "you say", "you reply", "you respond"]):
                clean_lines.append(line)

        result = ' '.join(clean_lines).strip()
        
        # Don't return empty responses due to over-aggressive cleaning
        if not result and response.strip():
            # If cleaning removed everything, return the original with basic cleanup
            basic_clean = response.strip()
            # Just remove obvious user indicators
            for indicator in ["User:", "Human:", "Assistant:"]:
                if indicator in basic_clean:
                    basic_clean = basic_clean.split(indicator)[0].strip()
            return basic_clean
            
        return result

    def generate_reply_with_timeout(self, prompt, timeout=RESPONSE_TIMEOUT):
        with self.generation_lock:
            if self.is_generating:
                print("[Already generating, please wait...]")
                return None
            self.is_generating = True

        try:
            return self._generate_reply(prompt, timeout)
        finally:
            self.is_generating = False

    def _generate_reply(self, prompt, timeout):
        try:
            print(f"[Generating response...]")
            print(f"[Prompt length: {len(self.tokenizer.encode(prompt))} tokens]")
            
            # Fixed: Proper input preparation with device handling
            inputs = self.tokenizer(
                prompt,
                return_tensors="pt",
                truncation=True,
                max_length=MAX_CONTEXT_LENGTH - MAX_NEW_TOKENS,
                padding=False
            )
            
            print(f"[Input tensor shape: {inputs['input_ids'].shape}]")
            
            # Move inputs to the correct device
            if hasattr(self.model, 'device'):
                device = self.model.device
            else:
                device = next(self.model.parameters()).device
            
            print(f"[Moving inputs to device: {device}]")
            
            # Handle multi-device models (quantized models might spread across devices)
            try:
                inputs = {k: v.to(device) for k, v in inputs.items()}
            except Exception as e:
                print(f"Warning: Could not move all inputs to {device}: {e}")
                # For quantized models, just ensure input_ids are on the right device
                inputs = {k: v.to(device) if k == 'input_ids' else v for k, v in inputs.items()}

            streamer = TextIteratorStreamer(
                self.tokenizer,
                skip_special_tokens=True,
                skip_prompt=True,
                timeout=60.0
            )

            generation_kwargs = {
                **inputs,
                "max_new_tokens": MAX_NEW_TOKENS,  # Fixed: Use the corrected value
                "do_sample": True,
                "temperature": 0.72,
                "top_p": 0.92,
                "top_k": 35,
                "repetition_penalty": 1.08,
                "pad_token_id": self.tokenizer.eos_token_id,
                "eos_token_id": self.tokenizer.eos_token_id,
                "use_cache": True,
                "streamer": streamer
            }

            print("[Starting generation thread...]")
            generation_thread = threading.Thread(
                target=self._run_generation,
                args=(generation_kwargs,)
            )
            generation_thread.daemon = True
            generation_thread.start()

            print(f"{BOT_NAME}: ", end="", flush=True)
            full_response = ""
            start_time = time.time()
            last_token_time = start_time
            sentence_count = 0
            word_count = 0
            tokens_received = 0

            while True:
                current_time = time.time()

                if current_time - start_time > timeout:
                    print(f"\n[Generation timeout after {timeout}s]")
                    return None

                if current_time - last_token_time > 30.0:
                    print(f"\n[No new tokens for 30s, stopping. Received {tokens_received} tokens]")
                    break

                try:
                    token = next(streamer)
                    tokens_received += 1
                    print(token, end="", flush=True)
                    full_response += token
                    last_token_time = current_time

                    if ' ' in token:
                        word_count += token.count(' ')

                    if any(punct in token for punct in ['.', '!', '?']):
                        sentence_count += sum(token.count(p) for p in ['.', '!', '?'])

                    # Early stopping conditions - be less aggressive
                    if len(full_response.strip()) > 30:  # Reduced from 50
                        stripped = full_response.strip()

                        if any(indicator in stripped.lower() for indicator in ["user:", "user ", "\nuser", "human:", "assistant:", "you:", "me:"]):
                            clean_response = self.clean_response(stripped)
                            if clean_response:
                                full_response = clean_response
                                break

                        # Less aggressive stopping - allow longer responses
                        if word_count >= 200 and sentence_count >= 4:  # Increased thresholds
                            if not self.is_natural_continuation(stripped):
                                if any(stripped.endswith(punct) for punct in ['.', '!', '?', '~', 'β™ͺ']):
                                    break

                except StopIteration:
                    print(f"\n[Generation completed. Received {tokens_received} tokens]")
                    break
                except Empty:
                    time.sleep(0.1)
                    continue
                except Exception as e:
                    print(f"\n[Streaming error: {e}]")
                    break

            generation_thread.join(timeout=10.0)
            response = self.clean_response(full_response.strip())

            if response:
                if self.memory and self.memory[-1]["bot"] is None:
                    self.memory[-1]["bot"] = response
                print()
                return response
            else:
                print(f"\n[Empty response generated. Raw response length: {len(full_response)}]")
                if full_response.strip():
                    print(f"[Raw response: '{full_response[:100]}...']")
                return None

        except Exception as e:
            print(f"\n[Generation error: {e}]")
            traceback.print_exc()
            return None
        finally:
            if torch.cuda.is_available():
                torch.cuda.empty_cache()

    def _run_generation(self, kwargs):
        try:
            torch.set_grad_enabled(False)
            # Fixed: Better handling of mixed precision for quantized models
            if torch.cuda.is_available() and not self.use_quantization:
                with torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16):
                    self.model.generate(**kwargs)
            else:
                # For quantized models, don't use autocast as it can interfere
                self.model.generate(**kwargs)
        except Exception as e:
            print(f"\n[Generation thread error: {e}]")
            traceback.print_exc()

    def cleanup_memory(self):
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.synchronize()
        gc.collect()

    def get_memory_info(self):
        if torch.cuda.is_available():
            allocated = torch.cuda.memory_allocated() / 1024**3
            cached = torch.cuda.memory_reserved() / 1024**3
            return f"GPU Memory - Allocated: {allocated:.2f}GB, Cached: {cached:.2f}GB"
        else:
            import psutil
            memory = psutil.virtual_memory()
            return f"RAM Usage: {memory.percent}% ({memory.used / 1024**3:.2f}GB used)"

def main():
    bot = OptimizedChatBot()
    try:
        print("Initializing chatbot...")
        bot.load_system_prompt(BOT_NAME)
        bot.load_model()

        print(f"\n{'='*50}")
        print(f"{BOT_NAME} is ready!")
        print("Commands:")
        print("  'exit' - Quit the program")
        print("  'clear' - Reset conversation memory")
        print("  'memory' - Show memory usage")
        print("  'status' - Show bot status")
        print(f"{'='*50}\n")

        conversation_count = 0

        while True:
            try:
                user_input = input("You: ").strip()

                if user_input.lower() == "exit":
                    print("Goodbye! πŸ‘‹")
                    break
                elif user_input.lower() == "clear":
                    bot.memory = []
                    print("βœ… Conversation memory cleared.")
                    continue
                elif user_input.lower() == "memory":
                    print(f"πŸ“Š {bot.get_memory_info()}")
                    continue
                elif user_input.lower() == "status":
                    status = "🟒 Ready" if not bot.is_generating else "🟑 Generating"
                    print(f"Status: {status}")
                    print(f"Conversation turns: {len([t for t in bot.memory if t['bot'] is not None])}")
                    continue
                elif not user_input:
                    continue

                start_time = time.time()
                prompt = bot.prepare_prompt(user_input)
                response = bot.generate_reply_with_timeout(prompt)

                if response:
                    response_time = time.time() - start_time
                    print(f"[⏱️ {response_time:.2f}s]")
                else:
                    print("❌ Failed to generate response. Try again or type 'clear' to reset.")

                conversation_count += 1

                if conversation_count % 10 == 0:
                    print("[🧹 Cleaning up memory...]")
                    bot.cleanup_memory()

            except KeyboardInterrupt:
                print("\n\n⚠️ Interrupted by user. Exiting gracefully...")
                break
            except Exception as e:
                print(f"\n❌ Conversation error: {e}")
                traceback.print_exc()
                print("Continuing... (type 'exit' to quit)")

    except Exception as e:
        print(f"πŸ’₯ Startup error: {e}")
        traceback.print_exc()
    finally:
        print("\n🧹 Performing final cleanup...")
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
            torch.cuda.synchronize()
        gc.collect()
        print("βœ… Cleanup completed. Goodbye!")

if __name__ == "__main__":
    torch.cuda.empty_cache()
    import gc
    gc.collect()
    main()
Downloads last month
6
Safetensors
Model size
7.24B params
Tensor type
F16
Β·
Inference Providers NEW
This model isn't deployed by any Inference Provider. πŸ™‹ Ask for provider support