Docs/Node.js Guide
Node.js / JavaScript

Node.js Integration Guide

Add AI safety guardrails to your Node.js applications. This guide covers fetch, axios, middleware patterns, and Express integration.

Prerequisites

  • Node.js 18+ (for native fetch) or Node.js 14+ with axios
  • A Tork API key (get one here)
  • Basic knowledge of async/await

Environment Setup

Store your API key securely using environment variables.

bash.env
# .env
TORK_API_KEY=tork_live_xxxxxxxxxxxxxxxxxxxxxxxxxxxx

Never commit your API key to version control. Add .env to your .gitignore.

Basic Usage

Make your first API call using fetch or axios.

javascript
// Using native fetch (Node.js 18+)
const response = await fetch('https://tork.network/api/v1/evaluate', {
  method: 'POST',
  headers: {
    'Content-Type': 'application/json',
    'Authorization': `Bearer ${process.env.TORK_API_KEY}`
  },
  body: JSON.stringify({
    content: userMessage,
    model: 'gpt-4',
    context: 'chatbot'
  })
});

const result = await response.json();

if (!result.safe) {
  console.log('Unsafe content detected:', result.flags);
}

Reusable Client

Create a wrapper function for cleaner code across your application.

javascripttork.js
// tork.js - Reusable Tork client
const TORK_API_URL = 'https://tork.network/api/v1';

export async function evaluateContent(content, options = {}) {
  const { model = 'gpt-4', context = 'general' } = options;

  const response = await fetch(`${TORK_API_URL}/evaluate`, {
    method: 'POST',
    headers: {
      'Content-Type': 'application/json',
      'Authorization': `Bearer ${process.env.TORK_API_KEY}`
    },
    body: JSON.stringify({ content, model, context })
  });

  if (!response.ok) {
    throw new Error(`Tork API error: ${response.status}`);
  }

  return response.json();
}

// Usage
const result = await evaluateContent(userInput, { context: 'chatbot' });

Error Handling

Handle API errors, rate limits, and network issues gracefully.

javascript
async function safeEvaluate(content) {
  try {
    const response = await fetch('https://tork.network/api/v1/evaluate', {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
        'Authorization': `Bearer ${process.env.TORK_API_KEY}`
      },
      body: JSON.stringify({ content, model: 'gpt-4' })
    });

    // Handle HTTP errors
    if (!response.ok) {
      if (response.status === 401) {
        throw new Error('Invalid API key');
      }
      if (response.status === 429) {
        throw new Error('Rate limit exceeded. Please retry later.');
      }
      throw new Error(`API error: ${response.status}`);
    }

    return await response.json();

  } catch (error) {
    // Network errors, timeouts, etc.
    if (error.name === 'AbortError') {
      throw new Error('Request timed out');
    }
    throw error;
  }
}

// With timeout
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), 5000);

try {
  const response = await fetch(url, { signal: controller.signal });
  // ...
} finally {
  clearTimeout(timeout);
}

Common Error Codes

401Invalid or missing API key
429Rate limit exceeded (100 req/min)
400Invalid request body
500Server error (retry with backoff)

Express Middleware

Protect your API endpoints with a reusable middleware.

javascriptmiddleware/torkGuard.js
// middleware/torkGuard.js
export function torkGuard(options = {}) {
  const { blockUnsafe = true, logOnly = false } = options;

  return async (req, res, next) => {
    // Skip if no body to evaluate
    if (!req.body?.message && !req.body?.content) {
      return next();
    }

    const content = req.body.message || req.body.content;

    try {
      const evaluation = await fetch('https://tork.network/api/v1/evaluate', {
        method: 'POST',
        headers: {
          'Content-Type': 'application/json',
          'Authorization': `Bearer ${process.env.TORK_API_KEY}`
        },
        body: JSON.stringify({
          content,
          model: req.body.model || 'gpt-4',
          context: options.context || 'api'
        })
      }).then(r => r.json());

      // Attach evaluation to request for downstream use
      req.torkEvaluation = evaluation;

      if (!evaluation.safe) {
        console.warn('Unsafe content detected:', {
          flags: evaluation.flags,
          riskScore: evaluation.riskScore,
          requestId: evaluation.requestId
        });

        if (blockUnsafe && !logOnly) {
          return res.status(400).json({
            error: 'Content blocked by safety filter',
            flags: evaluation.flags,
            requestId: evaluation.requestId
          });
        }
      }

      next();

    } catch (error) {
      console.error('Tork evaluation failed:', error);
      // Fail open or closed based on config
      if (options.failClosed) {
        return res.status(503).json({ error: 'Safety check unavailable' });
      }
      next();
    }
  };
}

Complete Express Example

A full example showing Tork integration in an Express chat API.

javascriptserver.js
import express from 'express';
import { torkGuard } from './middleware/torkGuard.js';

const app = express();
app.use(express.json());

// Apply Tork guard to all AI-related routes
app.use('/api/chat', torkGuard({ context: 'chatbot' }));
app.use('/api/generate', torkGuard({ context: 'content-generation' }));

// Chat endpoint with Tork protection
app.post('/api/chat', async (req, res) => {
  const { message, conversationId } = req.body;

  // Content already validated by middleware
  // req.torkEvaluation contains the full evaluation result

  try {
    // Your AI logic here (e.g., call OpenAI)
    const aiResponse = await generateAIResponse(message, conversationId);

    // Optionally evaluate AI response too
    const outputEval = await evaluateContent(aiResponse);

    if (!outputEval.safe) {
      return res.json({
        response: "I can't provide that information.",
        filtered: true
      });
    }

    res.json({ response: aiResponse });

  } catch (error) {
    res.status(500).json({ error: 'Failed to generate response' });
  }
});

app.listen(3000, () => {
  console.log('Server running with Tork protection');
});

TypeScript Support

Type definitions for better IDE support and type safety.

typescripttypes/tork.ts
// types/tork.ts
interface TorkEvaluationRequest {
  content: string;
  model?: string;
  context?: string;
}

interface TorkEvaluationResponse {
  safe: boolean;
  confidence: number;
  flags: string[];
  riskScore: number;
  processingTime: number;
  requestId: string;
}

async function evaluateContent(
  request: TorkEvaluationRequest
): Promise<TorkEvaluationResponse> {
  const response = await fetch('https://tork.network/api/v1/evaluate', {
    method: 'POST',
    headers: {
      'Content-Type': 'application/json',
      'Authorization': `Bearer ${process.env.TORK_API_KEY}`
    },
    body: JSON.stringify(request)
  });

  if (!response.ok) {
    throw new Error(`Tork API error: ${response.status}`);
  }

  return response.json();
}

Batch Processing

Evaluate multiple items while respecting rate limits.

javascript
// Process multiple items with rate limiting
async function evaluateBatch(items, { concurrency = 5, delayMs = 100 } = {}) {
  const results = [];

  for (let i = 0; i < items.length; i += concurrency) {
    const batch = items.slice(i, i + concurrency);

    const batchResults = await Promise.all(
      batch.map(item =>
        evaluateContent(item.content, { context: item.context })
          .catch(err => ({ error: err.message, item }))
      )
    );

    results.push(...batchResults);

    // Respect rate limits
    if (i + concurrency < items.length) {
      await new Promise(r => setTimeout(r, delayMs));
    }
  }

  return results;
}

// Usage
const items = [
  { content: 'Message 1', context: 'chat' },
  { content: 'Message 2', context: 'chat' },
  // ...
];

const results = await evaluateBatch(items);
const unsafe = results.filter(r => !r.safe);

Best Practices

Evaluate both input and output

Check user messages before sending to AI, and AI responses before showing to users.

Use appropriate context

Set context parameter to 'chatbot', 'content-generation', or 'api' for better detection accuracy.

Implement retry logic

Use exponential backoff for 429 and 5xx errors. Don't retry 401 or 400 errors.

Log evaluation results

Store requestId for debugging and compliance. Track blocked content patterns.

Fail gracefully

Decide whether to fail open (allow on error) or fail closed (block on error) based on your risk tolerance.

Next Steps

Explore more endpoints for jailbreak detection, RAG validation, and multi-agent orchestration.

Documentation

Learn to integrate TORK

Upgrade Plan

Current: free

Support

Get help from our team