Documentation Index
Fetch the complete documentation index at: https://docs.cirron.com/llms.txt
Use this file to discover all available pages before exploring further.
TensorFlow Template
The TensorFlow template creates a production-ready ML project with TensorFlow, Keras, and common ML dependencies. This template is perfect for deep learning tasks, computer vision, and neural network development.
Quick Start
Create a new TensorFlow project:
cirron init my-tensorflow-model --template tensorflow
The model type (classification, regression, etc.) is selected interactively during cirron init.
Project Structure
The TensorFlow template generates the following project structure:
my-tensorflow-model/
├── src/
│ ├── model.py # Model architecture definition
│ ├── data_loader.py # Data loading utilities
│ ├── train.py # Training script
│ └── inference.py # Inference/prediction script
├── data/ # Data directory
├── checkpoints/ # Model checkpoints (created during training)
├── models/ # Saved models (created during training)
├── requirements.txt # Python dependencies
├── Dockerfile # Container configuration
└── cirron.yaml # Project configuration
Generated Files
requirements.txt
tensorflow>=2.12.0
numpy>=1.21.0
scikit-learn>=1.3.0
matplotlib>=3.5.0
Pillow>=9.0.0
requests>=2.28.0
src/model.py
The model architecture is automatically generated based on your model type:
Classification Model
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
def create_model(num_classes=10):
"""Create a CNN model for classification"""
model = keras.Sequential([
# Convolutional layers
layers.Conv2D(32, 3, padding='same', activation='relu', input_shape=(224, 224, 3)),
layers.MaxPooling2D(2, 2),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(2, 2),
layers.Conv2D(128, 3, padding='same', activation='relu'),
layers.MaxPooling2D(2, 2),
# Flatten and dense layers
layers.Flatten(),
layers.Dropout(0.25),
layers.Dense(512, activation='relu'),
layers.Dropout(0.25),
layers.Dense(num_classes, activation='softmax')
])
return model
Regression Model
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
def create_model(input_shape=(784,)):
"""Create a neural network for regression"""
model = keras.Sequential([
# Input layer
layers.Input(shape=input_shape),
# Hidden layers
layers.Dense(512, activation='relu'),
layers.Dropout(0.2),
layers.Dense(256, activation='relu'),
layers.Dropout(0.2),
layers.Dense(128, activation='relu'),
layers.Dropout(0.2),
# Output layer
layers.Dense(1, activation='linear')
])
return model
src/data_loader.py
Data loading utilities for TensorFlow:
import tensorflow as tf
import numpy as np
import os
from PIL import Image
def load_and_preprocess_image(file_path, target_size=(224, 224)):
"""Load and preprocess an image"""
img = tf.io.read_file(file_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize(img, target_size)
img = tf.cast(img, tf.float32) / 255.0
return img
def create_dataset(data_path, batch_size=32, shuffle=True):
"""Create TensorFlow dataset from directory"""
# TODO: Implement based on your data structure
# This is a placeholder - customize for your data
# Example for image classification
dataset = tf.data.Dataset.list_files(os.path.join(data_path, "*.jpg"))
dataset = dataset.map(load_and_preprocess_image)
if shuffle:
dataset = dataset.shuffle(buffer_size=1000)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
def get_data_loaders(config):
"""Get training and validation datasets"""
train_dataset = create_dataset(
os.path.join(config['data_path'], 'train'),
batch_size=config['batch_size'],
shuffle=True
)
val_dataset = create_dataset(
os.path.join(config['data_path'], 'val'),
batch_size=config['batch_size'],
shuffle=False
)
return train_dataset, val_dataset
src/train.py
Complete training script with TensorFlow/Keras best practices:
import tensorflow as tf
import numpy as np
import os
from model import create_model
from data_loader import get_data_loaders
class Trainer:
def __init__(self, config):
self.config = config
self.model = create_model()
# Compile model
self.compile_model()
# Data loaders
self.train_dataset, self.val_dataset = get_data_loaders(config)
# Callbacks
self.callbacks = self.get_callbacks()
def compile_model(self):
"""Compile the model with appropriate loss and metrics"""
if self.config['model_type'] == 'classification':
loss = 'sparse_categorical_crossentropy'
metrics = ['accuracy']
elif self.config['model_type'] == 'regression':
loss = 'mse'
metrics = ['mae']
else:
loss = 'mse'
metrics = ['mae']
self.model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=self.config['learning_rate']),
loss=loss,
metrics=metrics
)
def get_callbacks(self):
"""Setup training callbacks"""
callbacks = []
# Model checkpoint
os.makedirs('checkpoints', exist_ok=True)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath='checkpoints/best_model.h5',
save_best_only=True,
monitor='val_loss',
mode='min'
)
callbacks.append(checkpoint_callback)
# Early stopping
early_stop_callback = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=5,
restore_best_weights=True
)
callbacks.append(early_stop_callback)
return callbacks
def train(self):
"""Train the model"""
history = self.model.fit(
self.train_dataset,
validation_data=self.val_dataset,
epochs=self.config['num_epochs'],
callbacks=self.callbacks,
verbose=1
)
return history
if __name__ == "__main__":
config = {
'batch_size': 32,
'learning_rate': 0.001,
'num_epochs': 10,
'model_type': 'classification', # or 'regression'
'data_path': 'data/',
}
trainer = Trainer(config)
history = trainer.train()
# Save final model
os.makedirs('models', exist_ok=True)
trainer.model.save('models/final_model.h5')
print("Training completed!")
src/inference.py
Production-ready inference script:
import tensorflow as tf
import numpy as np
from PIL import Image
from model import create_model
class ModelInference:
def __init__(self, model_path: str = None):
self.model = create_model()
if model_path:
self.load_model(model_path)
def load_model(self, model_path: str):
"""Load trained model weights"""
self.model.load_weights(model_path)
print(f"Model loaded from {model_path}")
def preprocess(self, input_data):
"""Preprocess input data"""
if isinstance(input_data, Image.Image):
input_data = input_data.resize((224, 224))
input_array = np.array(input_data) / 255.0
input_array = np.expand_dims(input_array, axis=0)
else:
input_array = np.array(input_data)
if len(input_array.shape) == 1:
input_array = np.expand_dims(input_array, axis=0)
return input_array.astype(np.float32)
def predict(self, input_data):
"""Make prediction"""
input_tensor = self.preprocess(input_data)
predictions = self.model.predict(input_tensor)
return predictions
if __name__ == "__main__":
inference = ModelInference()
# Example usage
sample_input = np.random.randn(224, 224, 3)
result = inference.predict(sample_input)
print(f"Prediction: {result}")
Model Types
The TensorFlow template supports different model types:
Classification
- Use case: Image classification, text classification, multi-class problems
- Output: Class probabilities
- Loss function: sparse_categorical_crossentropy
- Activation: Softmax
- Metrics: Accuracy
Regression
- Use case: Price prediction, time series forecasting, continuous value prediction
- Output: Continuous values
- Loss function: MSE (Mean Squared Error)
- Activation: Linear
- Metrics: MAE (Mean Absolute Error)
Training Configuration
Default training configuration:
config = {
'batch_size': 32,
'learning_rate': 0.001,
'num_epochs': 10,
'model_type': 'classification', # or 'regression'
'data_path': 'data/',
}
Usage Examples
Basic Training
# Navigate to your project
cd my-tensorflow-model
# Train the model
python src/train.py
Custom Training
# Modify config in src/train.py
config = {
'batch_size': 64,
'learning_rate': 0.0001,
'num_epochs': 50,
'model_type': 'classification',
'data_path': 'data/',
}
Inference
from src.inference import ModelInference
# Load trained model
inference = ModelInference('checkpoints/best_model.h5')
# Make prediction
result = inference.predict(your_input_data)
print(f"Prediction: {result}")
TensorFlow/Keras Features
Built-in Callbacks
The training script includes essential callbacks:
- ModelCheckpoint: Saves the best model based on validation loss
- EarlyStopping: Stops training when validation loss stops improving
Data Pipeline
- tf.data.Dataset: Efficient data loading and preprocessing
- Prefetching: Optimizes data pipeline performance
- Shuffling: Ensures proper training randomization
Model Compilation
- Automatic loss selection: Based on model type
- Appropriate metrics: Accuracy for classification, MAE for regression
- Adam optimizer: Default optimizer with configurable learning rate
Customization
Adding Custom Models
- Modify
src/model.py to add your custom architecture
- Update the model creation function
- Adjust the data preprocessing in
src/data_loader.py
Custom Data Loading
- Update the
create_dataset function in src/data_loader.py
- Implement data loading logic for your specific data format
- Adjust preprocessing steps as needed
Hyperparameter Tuning
Modify the config dictionary in src/train.py:
config = {
'batch_size': 64, # Larger batches for more stable training
'learning_rate': 0.0001, # Lower learning rate for fine-tuning
'num_epochs': 100, # More epochs for better convergence
'model_type': 'classification',
'data_path': 'data/',
}
Adding Custom Callbacks
def get_callbacks(self):
callbacks = []
# Existing callbacks...
# Add custom callback
custom_callback = tf.keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.5,
patience=3,
min_lr=1e-7
)
callbacks.append(custom_callback)
return callbacks
Best Practices
Data Organization
data/
├── train/
│ ├── class1/
│ ├── class2/
│ └── ...
└── val/
├── class1/
├── class2/
└── ...
Model Saving
- Best model is saved as
checkpoints/best_model.h5
- Final model is saved as
models/final_model.h5
- Models are saved in HDF5 format for compatibility
GPU Support
- Automatically detects GPU availability
- Uses mixed precision training when available
- Optimizes for TensorFlow’s GPU acceleration
Memory Management
- Uses
tf.data.AUTOTUNE for optimal performance
- Implements prefetching to reduce I/O bottlenecks
- Efficient batch processing with configurable batch sizes
Data Pipeline Optimization
# Optimize data loading
dataset = dataset.prefetch(tf.data.AUTOTUNE)
dataset = dataset.cache() # Cache data in memory if possible
Mixed Precision Training
# Enable mixed precision for faster training
policy = tf.keras.mixed_precision.Policy('mixed_float16')
tf.keras.mixed_precision.set_global_policy(policy)
Model Optimization
# Optimize model for inference
optimized_model = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
Next Steps