Code Generation Usage Guide

Overview

This guide covers how to use Circuitry-generated code in your projects across all 13 supported programming languages. Generated code creates standalone, reusable functions that can be integrated into existing applications without dependencies on the Circuitry platform.

Quick Start

1. Generate Code

  1. Open your workflow in Circuitry
  2. Click "Generate Code" button
  3. Select your target language
  4. Enable "Include Helper Libraries" (recommended)
  5. Click "Generate" to create the code
  6. Download both the generated code and helper libraries

2. Setup Environment

Each language requires specific setup steps and dependencies.


JavaScript

Installation

npm install  # If using helper libraries with package.json

Environment Variables

export OPENAI_API_KEY=your_openai_api_key_here

Usage Example

import { httpRequest, callAI } from './circuitry-helpers.js';
import { showMessage } from './circuitry-ui.js';

// Generated function (example)
async function myWorkflow(input = {}) {
  let data = input;
  
  // HTTP request
  data = await httpRequest('https://api.example.com/data');
  
  // AI processing
  const analysis = await callAI('Analyze this data: ' + JSON.stringify(data));
  data.analysis = analysis.response;
  
  // Code node - inline JavaScript
  data.processed = data.items.map(item => ({
    ...item,
    processed: true,
    timestamp: new Date().toISOString()
  }));
  
  // Show result
  await showMessage('Processing complete', 'success');
  
  return data;
}

// Use in your application
const result = await myWorkflow({ items: [...] });
console.log(result);

Python

Installation

pip install -r requirements.txt
# For Code nodes, requirements.txt includes:
# PyExecJS>=1.5.1  # JavaScript execution

Environment Variables

export OPENAI_API_KEY=your_openai_api_key_here

Usage Example

import asyncio
from circuitry import http_request, call_ai, execute_javascript
from circuitry_ui import show_message, confirm_action

# Generated function (example)
async def my_workflow(input_data: dict = {}) -> dict:
    data = input_data.copy()
    
    # HTTP request
    data = await http_request('https://api.example.com/data', method='GET')
    
    # User confirmation
    confirmation = await confirm_action(f"Process {len(data.get('items', []))} items?")
    if not confirmation['confirmed']:
        return {'cancelled': True}
    
    # AI processing
    analysis = await call_ai(f'Analyze this data: {data}', model='gpt-4o-mini')
    data['analysis'] = analysis['response']
    
    # Code node - execute JavaScript
    js_code = """
    data.processed = data.items.map(item => ({
        ...item,
        processed: true,
        timestamp: new Date().toISOString(),
        score: Math.random() * 100
    }));
    
    data.summary = {
        total: data.items.length,
        processed: data.processed.length,
        avgScore: data.processed.reduce((sum, item) => sum + item.score, 0) / data.processed.length
    };
    """
    data = await execute_javascript(js_code, data)
    
    # Show completion message
    await show_message(f"Processed {data['summary']['total']} items successfully", 'success')
    
    return data

# Use in your application
if __name__ == '__main__':
    result = asyncio.run(my_workflow({'items': [...]}))
    print(result)

Go

Installation

go mod tidy
# For Code nodes:
go get github.com/dop251/goja  # JavaScript engine

Environment Variables

export OPENAI_API_KEY=your_openai_api_key_here

Usage Example

package main

import (
    "encoding/json"
    "fmt"
    "log"
    "sync"
    
    "github.com/your-org/workflow/helpers"
    "github.com/your-org/workflow/ui"
)

// Generated function with parallel execution (example)
func myWorkflow(input map[string]interface{}) (map[string]interface{}, error) {
    data := input
    if data == nil {
        data = make(map[string]interface{})
    }
    
    // Initial HTTP request
    result, err := helpers.HTTPRequest("https://api.example.com/data", nil)
    if err != nil {
        return nil, err
    }
    data = result
    
    // Fork node - parallel execution
    var wg sync.WaitGroup
    var branch1Result, branch2Result map[string]interface{}
    var err1, err2 error
    
    wg.Add(2)
    
    // Branch 1: AI analysis
    go func() {
        defer wg.Done()
        analysis, err := helpers.CallAI("Analyze items", "gpt-4o-mini", map[string]interface{}{})
        if err != nil {
            err1 = err
            return
        }
        branch1Result = map[string]interface{}{"analysis": analysis}
    }()
    
    // Branch 2: Data processing with JavaScript
    go func() {
        defer wg.Done()
        jsCode := `
        data.processed = data.items.map(item => ({
            ...item,
            processed: true,
            timestamp: new Date().toISOString()
        }));
        `
        result, err := helpers.ExecuteJavaScript(jsCode, data)
        if err != nil {
            err2 = err
            return
        }
        branch2Result = result
    }()
    
    wg.Wait()
    
    // Check for errors
    if err1 != nil {
        return nil, err1
    }
    if err2 != nil {
        return nil, err2
    }
    
    // Join results
    for key, value := range branch1Result {
        data[key] = value
    }
    for key, value := range branch2Result {
        data[key] = value
    }
    
    // Show completion
    ui.ShowMessage("Workflow completed successfully", "success")
    
    return data, nil
}

func main() {
    input := map[string]interface{}{
        "items": []interface{}{
            map[string]interface{}{"id": "1", "name": "Item 1"},
            map[string]interface{}{"id": "2", "name": "Item 2"},
        },
    }
    
    result, err := myWorkflow(input)
    if err != nil {
        log.Fatal(err)
    }
    
    // Pretty print result
    jsonResult, _ := json.MarshalIndent(result, "", "  ")
    fmt.Printf("Result:\n%s\n", jsonResult)
}

All Supported Languages

Circuitry supports code generation for 13 programming languages:

LanguageParallel ExecutionCode NodesHelper LibrariesDownload
JavaScriptPromise.all()✅ Inline✅ Full
TypeScriptPromise.all()✅ Inline✅ Full
Pythonasyncio.gather()✅ via executeJavaScript()✅ Full
GoGoroutines + WaitGroup✅ via ExecuteJavaScript()✅ Full
JavaCompletableFuture.allOf()✅ via executeJavaScript()✅ Full
Rusttokio::join!✅ via execute_javascript()✅ Full
Cpthread (optional)✅ via execute_javascript()✅ Full
C++std::async✅ via executeJavaScript()✅ Full
C#Task.WhenAll✅ via ExecuteJavaScript()✅ Full
PHPSequential✅ via executeJavaScript()✅ Full
Rubyconcurrent-ruby✅ via execute_javascript()✅ Full
KotlinCoroutines✅ via executeJavaScript()✅ Full
Swiftasync let✅ via executeJavaScript()✅ Full

Key Features

Code Node Support

  • JavaScript/TypeScript: Code executes directly inline
  • All Other Languages: Use executeJavaScript() helper function
  • Shared Context: JavaScript code has access to workflow data
  • Return Values: Modified data flows to next node

Helper Libraries

  • HTTP Requests: Full-featured HTTP clients with authentication
  • AI Integration: Direct OpenAI API calls with error handling
  • UI Interactions: User confirmations, messages, forms, file uploads
  • Utilities: JSON parsing, template variables, retry logic, logging

Installation Requirements

  • Node.js: Required for Code node support in non-JavaScript languages
  • API Keys: OpenAI API key for AI/Agent nodes
  • Language-Specific: Package managers and dependencies per language

Environment Setup

Universal Environment Variables

export OPENAI_API_KEY=your_openai_api_key_here
export ANTHROPIC_API_KEY=your_anthropic_api_key_here  # Optional
export CIRCUITRY_API_TIMEOUT=30000
export CIRCUITRY_LOG_LEVEL=info

Language-Specific Requirements

JavaScript/TypeScript:

  • Node.js 16+ or modern browser
  • Package manager: npm/yarn/pnpm

Python:

  • Python 3.9+
  • Package manager: pip
  • PyExecJS for Code nodes

Go:

  • Go 1.19+
  • Node.js for Code nodes
  • Modules: go mod tidy

Java:

  • Java 11+
  • GraalVM JS for Code nodes
  • Build tool: Maven/Gradle

Rust:

  • Rust 1.70+ with Tokio
  • Boa engine for Code nodes
  • Cargo for dependencies

And more... - Each language includes complete setup instructions in the helper library comments.

Best Practices

1. Error Handling

Always wrap generated functions in try-catch blocks:

try {
  const result = await myWorkflow(input);
  console.log('Success:', result);
} catch (error) {
  console.error('Workflow failed:', error);
}

2. Input Validation

Validate inputs before calling generated functions:

function validateInput(input) {
  if (!input || typeof input !== 'object') {
    throw new Error('Invalid input: must be an object');
  }
  // Add specific validation for your workflow
}

3. Environment Configuration

Use environment files for configuration:

OPENAI_API_KEY=your_key_here
CIRCUITRY_LOG_LEVEL=info
CIRCUITRY_DEBUG=false

4. Testing

Generated functions can be unit tested:

// Jest example
test('myWorkflow processes data correctly', async () => {
  const input = { items: [{ id: '1', name: 'Test' }] };
  const result = await myWorkflow(input);
  
  expect(result.processed).toBeDefined();
  expect(result.processed[0].processed).toBe(true);
});

Troubleshooting

Common Issues

1. Missing API Keys

Error: OpenAI API key not found
Solution: Set OPENAI_API_KEY environment variable

2. Node.js Missing (for Code nodes)

Error: Failed to execute JavaScript
Solution: Install Node.js for Code node support

3. Network Issues

Error: HTTP request failed
Solution: Check URLs, authentication, and network connectivity

4. Dependency Errors

Error: Module not found
Solution: Install required dependencies using language package manager

Debug Mode

Enable verbose logging:

export CIRCUITRY_DEBUG=true
export CIRCUITRY_LOG_LEVEL=debug

This shows detailed information about HTTP requests, AI calls, and Code node execution.

Integration Examples

Web Applications

  • React/Vue/Angular: Import generated functions as modules
  • Express/FastAPI: Use as route handlers
  • Serverless: Deploy as AWS Lambda, Vercel, or Netlify functions

Desktop Applications

  • Electron: Include in main or renderer process
  • Flutter/React Native: Use appropriate language bindings
  • Native Apps: Integrate with language-specific frameworks

CLI Tools

  • Node.js: Create command-line interfaces
  • Python: Use argparse for CLI arguments
  • Go: Build standalone executables

Microservices

  • Docker: Container-ready with dependencies
  • Kubernetes: Deploy as services
  • API Gateway: Expose as REST endpoints

Performance Tips

  1. Parallel Execution: Fork/join nodes run truly in parallel
  2. Connection Pooling: Helper libraries reuse HTTP connections
  3. Caching: Cache AI responses and API results where appropriate
  4. Error Recovery: Built-in retry logic with exponential backoff
  5. Resource Cleanup: Proper memory management in system languages

Next Steps