Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.cirron.com/llms.txt

Use this file to discover all available pages before exploring further.

Custom Template

The custom template creates a minimal ML project structure that you can use to implement your own models from scratch or integrate with custom ML frameworks. This template provides a clean foundation for building custom ML solutions.

Quick Start

Create a new custom project:
cirron init my-custom-model --template custom
The model type (classification, regression, etc.) is selected interactively during cirron init.

Project Structure

The custom template generates the following project structure:
my-custom-model/
├── src/
│   ├── model.py           # Your custom model implementation
│   ├── data_loader.py     # Data loading utilities
│   ├── train.py           # Training script (you implement)
│   └── inference.py       # Inference/prediction script
├── data/                  # Data directory
├── models/                # Saved models (created during training)
├── requirements.txt       # Python dependencies
├── Dockerfile            # Container configuration
└── cirron.yaml           # Project configuration

Generated Files

requirements.txt

numpy>=1.21.0
pandas>=1.5.0
scikit-learn>=1.3.0
matplotlib>=3.5.0
requests>=2.28.0

src/model.py

A minimal model template that you can customize:
"""
Custom model implementation
Modify this file to implement your specific model architecture
"""

class CustomModel:
    def __init__(self):
        # Initialize your model here
        pass
    
    def train(self, X, y):
        """Train the model"""
        # Implement training logic
        pass
    
    def predict(self, X):
        """Make predictions"""
        # Implement prediction logic
        pass
    
    def save(self, filepath):
        """Save the model"""
        # Implement model saving
        pass
    
    def load(self, filepath):
        """Load a saved model"""
        # Implement model loading
        pass

def create_model():
    """Factory function to create model instance"""
    return CustomModel()

src/inference.py

Production-ready inference script:
from model import create_model

class ModelInference:
    def __init__(self, model_path: str = None):
        self.model = create_model()
        
        if model_path:
            self.load_model(model_path)
    
    def load_model(self, model_path: str):
        """Load trained model"""
        self.model.load(model_path)
        print(f"Model loaded from {model_path}")
    
    def predict(self, input_data):
        """Make prediction"""
        predictions = self.model.predict(input_data)
        return predictions

if __name__ == "__main__":
    inference = ModelInference()
    
    # Example usage - modify based on your model
    sample_input = [1, 2, 3, 4, 5]  # Replace with actual input format
    result = inference.predict(sample_input)
    print(f"Prediction: {result}")

Implementation Examples

Example 1: Simple Linear Regression

import numpy as np
import pickle

class LinearRegressionModel:
    def __init__(self):
        self.weights = None
        self.bias = None
    
    def train(self, X, y):
        """Train using normal equation"""
        # Add bias term
        X_b = np.c_[np.ones((X.shape[0], 1)), X]
        
        # Normal equation: θ = (X^T X)^(-1) X^T y
        theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
        
        self.bias = theta[0]
        self.weights = theta[1:]
    
    def predict(self, X):
        """Make predictions"""
        return X.dot(self.weights) + self.bias
    
    def save(self, filepath):
        """Save model using pickle"""
        with open(filepath, 'wb') as f:
            pickle.dump({'weights': self.weights, 'bias': self.bias}, f)
    
    def load(self, filepath):
        """Load model using pickle"""
        with open(filepath, 'rb') as f:
            data = pickle.load(f)
            self.weights = data['weights']
            self.bias = data['bias']

def create_model():
    return LinearRegressionModel()

Example 2: Custom Neural Network

import numpy as np
import pickle

class SimpleNeuralNetwork:
    def __init__(self, input_size, hidden_size, output_size):
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        
        # Initialize weights
        self.W1 = np.random.randn(input_size, hidden_size) * 0.01
        self.b1 = np.zeros((1, hidden_size))
        self.W2 = np.random.randn(hidden_size, output_size) * 0.01
        self.b2 = np.zeros((1, output_size))
    
    def sigmoid(self, x):
        return 1 / (1 + np.exp(-x))
    
    def sigmoid_derivative(self, x):
        return x * (1 - x)
    
    def train(self, X, y, learning_rate=0.1, epochs=1000):
        """Train the neural network"""
        for epoch in range(epochs):
            # Forward pass
            hidden = self.sigmoid(X.dot(self.W1) + self.b1)
            output = self.sigmoid(hidden.dot(self.W2) + self.b2)
            
            # Backward pass
            output_error = y - output
            output_delta = output_error * self.sigmoid_derivative(output)
            
            hidden_error = output_delta.dot(self.W2.T)
            hidden_delta = hidden_error * self.sigmoid_derivative(hidden)
            
            # Update weights
            self.W2 += hidden.T.dot(output_delta) * learning_rate
            self.b2 += np.sum(output_delta, axis=0, keepdims=True) * learning_rate
            self.W1 += X.T.dot(hidden_delta) * learning_rate
            self.b1 += np.sum(hidden_delta, axis=0, keepdims=True) * learning_rate
    
    def predict(self, X):
        """Make predictions"""
        hidden = self.sigmoid(X.dot(self.W1) + self.b1)
        output = self.sigmoid(hidden.dot(self.W2) + self.b2)
        return output
    
    def save(self, filepath):
        """Save model"""
        with open(filepath, 'wb') as f:
            pickle.dump({
                'W1': self.W1, 'b1': self.b1,
                'W2': self.W2, 'b2': self.b2
            }, f)
    
    def load(self, filepath):
        """Load model"""
        with open(filepath, 'rb') as f:
            data = pickle.load(f)
            self.W1 = data['W1']
            self.b1 = data['b1']
            self.W2 = data['W2']
            self.b2 = data['b2']

def create_model():
    return SimpleNeuralNetwork(input_size=4, hidden_size=10, output_size=1)

Example 3: Integration with External Libraries

import lightgbm as lgb
import joblib

class LightGBMModel:
    def __init__(self):
        self.model = None
        self.params = {
            'objective': 'regression',
            'metric': 'rmse',
            'boosting_type': 'gbdt',
            'num_leaves': 31,
            'learning_rate': 0.05,
            'feature_fraction': 0.9
        }
    
    def train(self, X, y):
        """Train LightGBM model"""
        train_data = lgb.Dataset(X, label=y)
        self.model = lgb.train(self.params, train_data, num_boost_round=100)
    
    def predict(self, X):
        """Make predictions"""
        return self.model.predict(X)
    
    def save(self, filepath):
        """Save model"""
        joblib.dump(self.model, filepath)
    
    def load(self, filepath):
        """Load model"""
        self.model = joblib.load(filepath)

def create_model():
    return LightGBMModel()

Training Script Template

Create a src/train.py file for your training logic:
import numpy as np
import os
from model import create_model
from data_loader import load_data

def train_model():
    """Train your custom model"""
    # Load data
    X, y = load_data('data/sample_data.csv')
    
    # Create and train model
    model = create_model()
    model.train(X, y)
    
    # Save model
    os.makedirs('models', exist_ok=True)
    model.save('models/custom_model.pkl')
    
    print("Training completed!")

if __name__ == "__main__":
    train_model()

Data Loading Template

Create a src/data_loader.py file for your data loading logic:
import pandas as pd
import numpy as np

def load_data(data_path):
    """Load and preprocess your data"""
    # Load data
    data = pd.read_csv(data_path)
    
    # Separate features and target
    feature_columns = [col for col in data.columns if col != 'target']
    target_column = 'target'
    
    X = data[feature_columns].values
    y = data[target_column].values
    
    return X, y

def create_sample_data(n_samples=1000):
    """Create sample data for testing"""
    np.random.seed(42)
    
    # Generate sample features
    X = np.random.randn(n_samples, 4)
    
    # Generate sample target
    y = 2 * X[:, 0] + 1.5 * X[:, 1] + np.random.randn(n_samples) * 0.1
    
    return X, y

Usage Examples

Basic Usage

# Navigate to your project
cd my-custom-model

# Implement your model in src/model.py
# Implement training in src/train.py

# Train your model
python src/train.py

Custom Inference

from src.inference import ModelInference

# Load trained model
inference = ModelInference('models/custom_model.pkl')

# Make prediction
sample_input = np.array([[1.0, 2.0, 3.0, 4.0]])
result = inference.predict(sample_input)
print(f"Prediction: {result}")

Integration Examples

Integration with PyTorch

import torch
import torch.nn as nn

class PyTorchWrapper:
    def __init__(self):
        self.model = nn.Sequential(
            nn.Linear(4, 10),
            nn.ReLU(),
            nn.Linear(10, 1)
        )
        self.optimizer = torch.optim.Adam(self.model.parameters())
        self.criterion = nn.MSELoss()
    
    def train(self, X, y):
        """Train PyTorch model"""
        X_tensor = torch.FloatTensor(X)
        y_tensor = torch.FloatTensor(y).unsqueeze(1)
        
        for epoch in range(100):
            self.optimizer.zero_grad()
            outputs = self.model(X_tensor)
            loss = self.criterion(outputs, y_tensor)
            loss.backward()
            self.optimizer.step()
    
    def predict(self, X):
        """Make predictions"""
        X_tensor = torch.FloatTensor(X)
        with torch.no_grad():
            predictions = self.model(X_tensor)
        return predictions.numpy()
    
    def save(self, filepath):
        """Save PyTorch model"""
        torch.save(self.model.state_dict(), filepath)
    
    def load(self, filepath):
        """Load PyTorch model"""
        self.model.load_state_dict(torch.load(filepath))

def create_model():
    return PyTorchWrapper()

Integration with TensorFlow

import tensorflow as tf

class TensorFlowWrapper:
    def __init__(self):
        self.model = tf.keras.Sequential([
            tf.keras.layers.Dense(10, activation='relu', input_shape=(4,)),
            tf.keras.layers.Dense(1)
        ])
        self.model.compile(optimizer='adam', loss='mse')
    
    def train(self, X, y):
        """Train TensorFlow model"""
        self.model.fit(X, y, epochs=100, verbose=0)
    
    def predict(self, X):
        """Make predictions"""
        return self.model.predict(X)
    
    def save(self, filepath):
        """Save TensorFlow model"""
        self.model.save(filepath)
    
    def load(self, filepath):
        """Load TensorFlow model"""
        self.model = tf.keras.models.load_model(filepath)

def create_model():
    return TensorFlowWrapper()

Best Practices

Model Interface

  • Implement consistent train(), predict(), save(), and load() methods
  • Handle different input formats (numpy arrays, pandas DataFrames, etc.)
  • Provide clear error messages for invalid inputs

Data Handling

  • Implement proper data validation
  • Handle missing values and outliers
  • Scale features appropriately
  • Split data into train/validation/test sets

Model Persistence

  • Use appropriate serialization format (pickle, joblib, JSON, etc.)
  • Include model metadata (version, parameters, etc.)
  • Ensure cross-platform compatibility

Performance

  • Optimize for your specific use case
  • Use appropriate data structures
  • Consider parallel processing for large datasets
  • Profile your code for bottlenecks

Customization Options

Adding Dependencies

Update requirements.txt to include your custom dependencies:
numpy>=1.21.0
pandas>=1.5.0
scikit-learn>=1.3.0
matplotlib>=3.5.0
requests>=2.28.0
lightgbm>=3.3.0
xgboost>=1.6.0
catboost>=1.1.0

Custom Configuration

Add configuration options to your model:
class CustomModel:
    def __init__(self, config=None):
        self.config = config or {}
        # Initialize based on config

Advanced Features

  • Add model validation and testing
  • Implement model versioning
  • Add logging and monitoring
  • Create model explainability features

Next Steps