🎵 NEW: Song Generator API powered by ACE-Step v1.5 - Create full songs with vocals in 50+ languages (30s-8min) 🎶 · Try it now · Read Guide
Official TypeScript/JavaScript client for the ModelsLab API - generate images, videos, audio, and more.
npm install modelslab
yarn add modelslab
import { Client, Community } from "modelslab";
const client = new Client("your-api-key");
const community = new Community(client.key);
const result = await community.textToImage({
key: client.key,
prompt: "A beautiful sunset over mountains, photorealistic, 8k",
model_id: "flux",
width: 512,
height: 512,
samples: 1,
num_inference_steps: 30,
guidance_scale: 7.5
});
if (result.status === "success") {
console.log("Generated image:", result.output[0]);
} else if (result.status === "processing") {
console.log("Processing, request ID:", result.id);
}
import { Client } from "modelslab";
// Method 1: Direct API key
const client = new Client("your-api-key");
// Method 2: Environment variable
// Reads from process.env.API_KEY when called without args
const clientFromEnv = new Client();
// Method 3: With custom settings
// new Client(apiKey, retries, timeoutSeconds)
const clientCustom = new Client("your-api-key", 5, 10);
console.log(client.baseUrl); // API base URL
console.log(client.fetchRetry); // Number of retries
console.log(client.fetchTimeout); // Timeout in seconds
import { Community, Audio, Video, ImageEditing } from "modelslab";
// Create instances with your API key
const community = new Community(client.key);
const audio = new Audio(client.key);
const video = new Video(client.key);
const imageEditing = new ImageEditing(client.key);
// Pass true as second argument for enterprise mode
const enterpriseCommunity = new Community(client.key, true);
// Uses endpoints like: https://modelslab.com/api/v1/enterprise/images/
import { Client, Community } from "modelslab";
const client = new Client("your-api-key");
const community = new Community(client.key);
const result = await community.textToImage({
key: client.key,
prompt: "A cyberpunk city at night, neon lights, rain, cinematic",
negative_prompt: "blurry, low quality, distorted",
model_id: "flux",
width: 1024,
height: 1024,
samples: 1,
num_inference_steps: 30,
guidance_scale: 7.5,
seed: 12345 // Optional: for reproducible results
});
console.log("Generated image:", result.output[0]);
const result = await community.imageToImage({
key: client.key,
prompt: "Transform into a watercolor painting style",
negative_prompt: "photo, realistic",
init_image: "https://example.com/your-image.jpg",
model_id: "flux",
strength: 0.7, // How much to change (0-1)
width: 512,
height: 512,
num_inference_steps: 30,
guidance_scale: 7.5
});
console.log("Transformed image:", result.output[0]);
const result = await community.inpainting({
key: client.key,
prompt: "A red sports car",
init_image: "https://example.com/image.jpg",
mask_image: "https://example.com/mask.png", // White = edit, Black = keep
model_id: "flux",
width: 512,
height: 512,
num_inference_steps: 30,
guidance_scale: 7.5
});
console.log("Inpainted image:", result.output[0]);
const result = await community.controlnet({
key: client.key,
prompt: "A beautiful house, photorealistic, sunny day",
controlnet_model: "canny", // canny, depth, pose, etc.
controlnet_image: "https://example.com/edges.jpg",
controlnet_conditioning_scale: 1.0,
model_id: "flux",
width: 512,
height: 512,
num_inference_steps: 30,
guidance_scale: 7.5
});
console.log("ControlNet result:", result.output[0]);
import { Client, Audio } from "modelslab";
const client = new Client("your-api-key");
const audio = new Audio(client.key);
const result = await audio.textToAudio({
key: client.key,
prompt: "A calm piano melody with soft strings in the background",
duration: 30 // Duration in seconds
});
console.log("Audio URL:", result.output[0]);
const result = await audio.textToSpeech({
key: client.key,
text: "Hello! Welcome to ModelsLab. This is a sample of our text-to-speech API.",
voice_id: "alloy", // Choose from available voices
language: "en"
});
console.log("Speech URL:", result.output[0]);
const result = await audio.voice2voice({
key: client.key,
init_audio: "https://example.com/source-speech.mp3",
target_audio: "https://example.com/voice-to-clone.mp3"
});
console.log("Cloned voice URL:", result.output[0]);
const result = await audio.voiceCover({
key: client.key,
init_audio: "https://example.com/song.mp3",
voice_id: "celebrity-voice-id"
});
console.log("Voice cover URL:", result.output[0]);
const result = await audio.musicGen({
key: client.key,
prompt: "Upbeat electronic dance music with heavy bass drops and synth melodies",
duration: 30
});
console.log("Generated music:", result.output[0]);
const result = await audio.lyricsGen({
key: client.key,
prompt: "Write lyrics for a pop song about summer love and road trips"
});
console.log("Generated lyrics:", result.output);
const result = await audio.songGenerator({
key: client.key,
prompt: "A country ballad about hometown memories",
lyrics: "Optional: your custom lyrics here"
});
console.log("Generated song:", result.output[0]);
const result = await audio.speechToText({
key: client.key,
audio: "https://example.com/speech.mp3",
language: "en"
});
console.log("Transcription:", result.text);
const result = await audio.sfxGen({
key: client.key,
prompt: "Thunder rolling in the distance with heavy rain on a metal roof",
duration: 10
});
console.log("Sound effect URL:", result.output[0]);
import { Client, Video } from "modelslab";
const client = new Client("your-api-key");
const video = new Video(client.key);
const result = await video.textToVideo({
key: client.key,
model_id: "cogvideox",
prompt: "A spaceship flying through an asteroid field, cinematic, 4K",
negative_prompt: "low quality, blurry, static",
width: 512,
height: 512,
num_frames: 25,
num_inference_steps: 20,
guidance_scale: 7
});
// Video generation is async
if (result.status === "processing") {
console.log("Processing, request ID:", result.id);
console.log("ETA:", result.eta, "seconds");
} else if (result.status === "success") {
console.log("Video URL:", result.output[0]);
}
const result = await video.imageToVideo({
key: client.key,
model_id: "cogvideox",
init_image: "https://example.com/landscape.jpg",
prompt: "Clouds moving slowly, birds flying in the distance",
num_frames: 25,
num_inference_steps: 20,
guidance_scale: 7
});
console.log("Result:", result);
import { Client, ImageEditing } from "modelslab";
const client = new Client("your-api-key");
const imageEditing = new ImageEditing(client.key);
const result = await imageEditing.backgroundRemover({
key: client.key,
image: "https://example.com/photo.jpg"
});
console.log("Image without background:", result.output[0]);
const result = await imageEditing.superResolution({
key: client.key,
image: "https://example.com/low-res-image.jpg",
scale: 4 // 2x or 4x upscale
});
console.log("Upscaled image:", result.output[0]);
const result = await imageEditing.outpainting({
key: client.key,
image: "https://example.com/photo.jpg",
prompt: "Continue the landscape with mountains and trees",
width: 1024, // New width (larger than original)
height: 768 // New height
});
console.log("Extended image:", result.output[0]);
const result = await imageEditing.objectRemover({
key: client.key,
image: "https://example.com/photo.jpg",
mask_image: "https://example.com/mask.png" // White areas will be removed
});
console.log("Object removed:", result.output[0]);
const result = await imageEditing.fashion({
key: client.key,
model_image: "https://example.com/person.jpg",
cloth_image: "https://example.com/shirt.jpg"
});
console.log("Virtual try-on result:", result.output[0]);
const result = await imageEditing.facegen({
key: client.key,
image: "https://example.com/portrait.jpg",
prompt: "Make the person look 10 years younger"
});
console.log("Face generation result:", result.output[0]);
const result = await imageEditing.inpainting({
key: client.key,
image: "https://example.com/photo.jpg",
mask_image: "https://example.com/mask.png",
prompt: "A beautiful garden with flowers"
});
console.log("Inpainting result:", result.output[0]);
// Standard headshot
const result = await imageEditing.headshot({
key: client.key,
image: "https://example.com/selfie.jpg",
prompt: "Professional headshot, studio lighting, business attire"
});
console.log("Headshot:", result.output[0]);
// FLUX-powered headshot (higher quality)
const fluxResult = await imageEditing.fluxHeadshot({
key: client.key,
image: "https://example.com/selfie.jpg",
prompt: "Professional LinkedIn headshot, neutral background"
});
console.log("FLUX Headshot:", fluxResult.output[0]);
import { Client, Community } from "modelslab";
async function generateImage(prompt, apiKey) {
try {
const client = new Client(apiKey);
const community = new Community(client.key);
const result = await community.textToImage({
key: client.key,
prompt: prompt,
model_id: "flux",
width: 512,
height: 512,
samples: 1
});
if (result.status === "success") {
return result.output[0];
} else if (result.status === "processing") {
return { requestId: result.id, eta: result.eta };
} else {
throw new Error(result.message || "Unknown error");
}
} catch (error) {
console.error("Error generating image:", error.message);
throw error;
}
}
// Usage
try {
const imageUrl = await generateImage("A sunset over mountains", "your-api-key");
console.log("Generated:", imageUrl);
} catch (error) {
console.error("Failed:", error.message);
}
async function generateVideoWithPolling(prompt, apiKey, timeout = 300000) {
const client = new Client(apiKey);
const video = new Video(client.key);
const result = await video.textToVideo({
key: client.key,
model_id: "cogvideox",
prompt: prompt,
width: 512,
height: 512,
num_frames: 25
});
if (result.status === "success") {
return result.output[0];
}
if (result.status !== "processing") {
throw new Error(result.message || "Generation failed");
}
const requestId = result.id;
const startTime = Date.now();
while (Date.now() - startTime < timeout) {
// Poll the fetch endpoint
await new Promise(resolve => setTimeout(resolve, 5000));
// Check status using fetch endpoint
// If complete, return URL
// If failed, throw error
}
throw new Error("Timeout waiting for video generation");
}
Was this page helpful?