import chess import chess.engine import numpy as np import tensorflow as tf import time import os import datetime import shutil # For zip creation from google.colab import files # For download trigger # --- 1. Neural Network (Policy and Value Network) --- class PolicyValueNetwork(tf.keras.Model): def __init__(self, num_moves): super(PolicyValueNetwork, self).__init__() self.conv1 = tf.keras.layers.Conv2D(32, 3, activation='relu', padding='same') self.flatten = tf.keras.layers.Flatten() self.dense_policy = tf.keras.layers.Dense(num_moves, activation='softmax', name='policy_head') self.dense_value = tf.keras.layers.Dense(1, activation='tanh', name='value_head') def call(self, inputs): x = self.conv1(inputs) x = self.flatten(x) policy = self.dense_policy(x) value = self.dense_value(x) return policy, value # --- 2. Move Encoding/Decoding (Correct and Deterministic Implementation) --- NUM_POSSIBLE_MOVES = 4672 # Correct value based on deterministic encoding NUM_INPUT_PLANES = 12 # Load model weights policy_value_net = PolicyValueNetwork(NUM_POSSIBLE_MOVES) # dummy input for building network dummy_input = tf.random.normal((1, 8, 8, NUM_INPUT_PLANES)) policy, value = policy_value_net(dummy_input) # Load the weights (replace 'your_model.weights.h5' with your actual file) try: model_path = "/content/models_colab/StockZero-2025-03-24-1727.weights.h5" policy_value_net.load_weights(model_path) print(f"Model weights loaded successfully from '{model_path}'") except Exception as e: print(f"Error loading weights: {e}") # --- Create output directory and set output paths --- OUTPUT_DIR = "/content/converted_models" os.makedirs(OUTPUT_DIR, exist_ok=True) # Create the folder if it does not exist SAVED_MODEL_DIR = os.path.join(OUTPUT_DIR, "saved_model") KERAS_MODEL_PATH = os.path.join(OUTPUT_DIR, "model.keras") H5_MODEL_PATH = os.path.join(OUTPUT_DIR, "model_weights.h5") PYTORCH_MODEL_PATH = os.path.join(OUTPUT_DIR, "pytorch_model.pth") PYTORCH_FULL_MODEL_PATH = os.path.join(OUTPUT_DIR, "pytorch_full_model.pth") ONNX_MODEL_PATH = os.path.join(OUTPUT_DIR, "model.onnx") TFLITE_MODEL_PATH = os.path.join(OUTPUT_DIR, "model.tflite") BIN_FILE_PATH = os.path.join(OUTPUT_DIR, "model_weights.bin") NUMPY_FILE_PATH = os.path.join(OUTPUT_DIR, "model_weights.npz") # --- 1. Keras/TensorFlow (SavedModel format) --- try: tf.saved_model.save(policy_value_net, SAVED_MODEL_DIR) print(f"Model saved as SavedModel to '{SAVED_MODEL_DIR}'") except Exception as e: print(f"Error saving model as SavedModel: {e}") # --- 2. Keras .keras Format (Weights + Architecture) --- try: policy_value_net.save(KERAS_MODEL_PATH) print(f"Model saved as Keras .keras format to '{KERAS_MODEL_PATH}'") except Exception as e: print(f"Error saving as .keras format: {e}") # --- 3. Keras/TensorFlow (.h5 - Weights) --- try: policy_value_net.save_weights(H5_MODEL_PATH) print(f"Model weights saved as .h5 to '{H5_MODEL_PATH}'") except Exception as e: print(f"Error saving model weights as .h5: {e}") # --- 4. PyTorch --- import torch import torch.nn as nn class PyTorchPolicyValueNetwork(nn.Module): def __init__(self, num_moves): super(PyTorchPolicyValueNetwork, self).__init__() self.conv1 = nn.Conv2d(12, 32, kernel_size=3, padding=1) # Input 12 channels for chess self.relu = nn.ReLU() self.flatten = nn.Flatten() self.dense_policy = nn.Linear(8*8*32, num_moves) # Calculate size using the parameters from keras layer, after flatten output is 8*8*32 self.softmax = nn.Softmax(dim=1) self.dense_value = nn.Linear(8*8*32, 1) self.tanh = nn.Tanh() def forward(self, x): x = self.relu(self.conv1(x)) x = self.flatten(x) policy = self.softmax(self.dense_policy(x)) value = self.tanh(self.dense_value(x)) return policy, value try: pytorch_model = PyTorchPolicyValueNetwork(NUM_POSSIBLE_MOVES) # Get Keras layers keras_conv1 = policy_value_net.conv1 keras_dense_policy = policy_value_net.dense_policy keras_dense_value = policy_value_net.dense_value # Transfer weights from Keras to PyTorch pytorch_model.conv1.weight = torch.nn.Parameter(torch.tensor(keras_conv1.kernel.numpy().transpose(3,2,0,1), dtype=torch.float32)) pytorch_model.conv1.bias = torch.nn.Parameter(torch.tensor(keras_conv1.bias.numpy(), dtype=torch.float32)) pytorch_model.dense_policy.weight = torch.nn.Parameter(torch.tensor(keras_dense_policy.kernel.numpy().transpose(), dtype=torch.float32)) pytorch_model.dense_policy.bias = torch.nn.Parameter(torch.tensor(keras_dense_policy.bias.numpy(), dtype=torch.float32)) pytorch_model.dense_value.weight = torch.nn.Parameter(torch.tensor(keras_dense_value.kernel.numpy().transpose(), dtype=torch.float32)) pytorch_model.dense_value.bias = torch.nn.Parameter(torch.tensor(keras_dense_value.bias.numpy(), dtype=torch.float32)) torch.save(pytorch_model.state_dict(), PYTORCH_MODEL_PATH) print(f"PyTorch model weights saved to '{PYTORCH_MODEL_PATH}'") torch.save(pytorch_model, PYTORCH_FULL_MODEL_PATH) # Save full model print(f"PyTorch model saved as '{PYTORCH_FULL_MODEL_PATH}'") except Exception as e: print(f"Error during PyTorch conversion: {e}") # --- 5. ONNX --- import tf2onnx try: spec = (tf.TensorSpec((None, 8, 8, 12), tf.float32, name="input"),) onnx_model, _ = tf2onnx.convert.from_keras(policy_value_net, input_signature=spec) with open(ONNX_MODEL_PATH, "wb") as f: f.write(onnx_model.SerializeToString()) print(f"Model saved as ONNX to '{ONNX_MODEL_PATH}'") except Exception as e: print(f"Error saving model as ONNX: {e}") # --- 6. TensorFlow Lite --- try: converter = tf.lite.TFLiteConverter.from_keras_model(policy_value_net) tflite_model = converter.convert() with open(TFLITE_MODEL_PATH, 'wb') as f: f.write(tflite_model) print(f"Model saved as TFLite to '{TFLITE_MODEL_PATH}'") except Exception as e: print(f"Error converting to TFLite: {e}") # --- 7. Binary (.bin) format (Custom Implementation) --- try: with open(BIN_FILE_PATH, 'wb') as f: for layer in policy_value_net.layers: for weight in layer.weights: weight_arr = weight.numpy() f.write(weight_arr.tobytes()) print(f"Model weights saved as .bin to '{BIN_FILE_PATH}'") except Exception as e: print(f"Error saving model weights as .bin: {e}") # --- 8. NumPy arrays (.npz) format --- try: all_weights = {} for layer in policy_value_net.layers: for i, weight in enumerate(layer.weights): all_weights[f"{layer.name}_weight_{i}"] = weight.numpy() np.savez(NUMPY_FILE_PATH, **all_weights) print(f"Model weights saved as NumPy arrays to '{NUMPY_FILE_PATH}'") except Exception as e: print(f"Error saving model weights as NumPy: {e}") # --- 9. TensorFlow.js (requires command line tool)--- # --- This would require the TensorFlow.js converter tool --- # --- Command-Line example shown below (run in shell, not in the script) --- # --- tensorflowjs_converter --input_format=tf_saved_model ./saved_model ./tfjs_model --- print("To convert to TensorFlow.js format, run the 'tensorflowjs_converter' command-line tool (see comments in script).") # --- Zip all files and create download --- try: current_datetime = datetime.datetime.now() zip_file_name = f"converted_models-{current_datetime.strftime('%Y%m%d%H%M')}" zip_file_path = f"/content/{zip_file_name}" shutil.make_archive(zip_file_path, 'zip', OUTPUT_DIR) # Create zip archive print(f"All converted model files zipped to '{zip_file_path}.zip'") files.download(f"{zip_file_path}.zip") # Trigger download in Colab print("Download should start in a moment.") except Exception as e: print(f"Error zipping and creating download: {e}")