Enterprise: Video to Video Endpoint
Overview
Video to Video endpoint allows you to generates video from an existing video
The resolution of the output is 1024x576
caution
Make sure you add your s3 details for video
server, so you can receive image generated in your bucket.
Images generated without s3 details being added will be delete after 24 hours
Request
--request POST 'https://modelslab.com/api/v1/enterprise/video/video2video' \
Make a POST
request to https://modelslab.com/api/v1/enterprise/video/video2video endpoint and pass the required parameters in the request body.
Body Attributes
Parameter | Description |
---|---|
key | Your API Key used for request authorization. |
model_id | The ID of the model to use. The available model includes dark-sushi-mix ,epicrealismnaturalsi ,hellonijicute25d |
negative_prompt | Items you don't want in the video. |
seed | Seed is used to reproduce results, same seed will give you same image in return again. Pass null for a random number. |
height | Max height: 768px. |
width | Max width: 768px. |
init_video | Link equivalent of a valid mp4 of gif file to use as initial video conditioning. |
num_frames | Number of frames in generated video. Max: 25. Defaults to 16. |
num_inference_steps | Number of denoising steps. Max: 50. Defaults to 20. |
guidance_scale | Scale for classifier-free guidance. |
clip_skip | Number of CLIP layers to skip. 2 leads to more aesthetic defaults. Defauls to null. |
strength | Amount of variation you want between original video and final video. Higher values lead to more variation. Must be between 0 and 1. Defaults to 0.7. |
output_type | The output type could be mp4 ,gif . |
fps | Frames per second rate of generated video. |
lora_models | lora models to be used with the model id - default=null |
lora_strength | comma separated lora strengths - default=1.0 |
motion_loras | motion lora models to be used with the model id - default=null |
motion_lora_strength | comma separated motion lora strengths - default=1.0 |
domain_lora_scale | animate diff v3 scale - default=1.0 |
adapter_lora | motion model lora for v3 - default is v2_sd15_adapter |
upscale_height | height upscaling while inference - default=None max=1024 |
upscale_width | width upscaling while inference - default=None max=1024 |
upscale_strength | upscaling strength - default is 0.6 maximum is 1 |
upscale_guidance_scale | upscaling guidance scale - default is 15.0 |
upscale_num_inference_steps | num inference steps for upscaling - default is 20 |
motion_module | motion models - default is v2_sd15_mm . Other options include animatelcm ,v2_sd15_mm ,animateDiff-lightning |
instant_response | true if you'd like a response with future links for queued requests instantly instead of waiting for a fixed amount of time. Defaults to false . |
temp | true if you want to store your generations on our temporary storage. Temporary files are cleaned every 24 hours. Defaults to false . |
webhook | Set an URL to get a POST API call once the image generation is complete. |
track_id | This ID is returned in the response to the webhook API call. This will be used to identify the webhook request. |
Example
Body
Body
{
"key":"",
"model_id":"dark-sushi-mix",
"prompt":"fox playing ukulele on a boat floating on magma flowing under the boat",
"negative_prompt":"low quality",
"init_video":"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-output-1.gif",
"clip_skip":2,
"num_inference_steps":40,
"use_improved_sampling": false,
"guidance_scale":9.5,
"strength":0.8,
"base64":false,
"webhook":null,
"track_id": null
}
Request
- JS
- PHP
- NODE
- PYTHON
- JAVA
var myHeaders = new Headers();
myHeaders.append("Content-Type", "application/json");
var raw = JSON.stringify({
"key":"",
"model_id":"dark-sushi-mix",
"prompt":"fox playing ukulele on a boat floating on magma flowing under the boat",
"negative_prompt":"low quality",
"init_video":"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-output-1.gif",
"clip_skip":2,
"num_inference_steps":40,
"use_improved_sampling": false,
"guidance_scale":9.5,
"strength":0.8,
"base64":false,
"webhook":null,
"track_id": null
});
var requestOptions = {
method: 'POST',
headers: myHeaders,
body: raw,
redirect: 'follow'
};
fetch("https://modelslab.com/api/v1/enterprise/video/video2video", requestOptions)
.then(response => response.text())
.then(result => console.log(result))
.catch(error => console.log('error', error));
<?php
$payload = [
"key" =>"",
"model_id" => "dark-sushi-mix",
"prompt" => "fox playing ukulele on a boat floating on magma flowing under the boat",
"negative_prompt" => "low quality",
"init_video" => "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-output-1.gif",
"clip_skip"=> 2,
"num_inference_steps" => 40,
"use_improved_sampling" => false,
"guidance_scale" =>9.5,
"strength" => 0.8,
"base64" => false,
"webhook" => null,
"track_id" => null
];
$curl = curl_init();
curl_setopt_array($curl, array(
CURLOPT_URL => 'https://modelslab.com/api/v1/enterprise/video/video2video',
CURLOPT_RETURNTRANSFER => true,
CURLOPT_ENCODING => '',
CURLOPT_MAXREDIRS => 10,
CURLOPT_TIMEOUT => 0,
CURLOPT_FOLLOWLOCATION => true,
CURLOPT_HTTP_VERSION => CURL_HTTP_VERSION_1_1,
CURLOPT_CUSTOMREQUEST => 'POST',
CURLOPT_POSTFIELDS => json_encode($payload),
CURLOPT_HTTPHEADER => array(
'Content-Type: application/json'
),
));
$response = curl_exec($curl);
curl_close($curl);
echo $response;
var request = require('request');
var options = {
'method': 'POST',
'url': 'https://modelslab.com/api/v1/enterprise/video/video2video',
'headers': {
'Content-Type': 'application/json'
},
body: JSON.stringify({
"key":"",
"model_id":"dark-sushi-mix",
"prompt":"fox playing ukulele on a boat floating on magma flowing under the boat",
"negative_prompt":"low quality",
"init_video":"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-output-1.gif",
"clip_skip":2,
"num_inference_steps":40,
"use_improved_sampling": false,
"guidance_scale":9.5,
"strength":0.8,
"base64":false,
"webhook":null,
"track_id": null
})
};
request(options, function (error, response) {
if (error) throw new Error(error);
console.log(response.body);
});
import requests
import json
url = "https://modelslab.com/api/v1/enterprise/video/video2video"
payload = json.dumps({
"key":"",
"model_id":"dark-sushi-mix",
"prompt":"fox playing ukulele on a boat floating on magma flowing under the boat",
"negative_prompt":"low quality",
"init_video":"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-output-1.gif",
"clip_skip":2,
"num_inference_steps":40,
"use_improved_sampling": false,
"guidance_scale":9.5,
"strength":0.8,
"base64":false,
"webhook":None,
"track_id":None
})
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
OkHttpClient client = new OkHttpClient().newBuilder()
.build();
MediaType mediaType = MediaType.parse("application/json");
RequestBody body = RequestBody.create(mediaType, "{\n \"key\":\"\",\n \"model_id\":\"dark-sushi-mix\",\n \"prompt\":\"fox playing ukulele on a boat floating on magma flowing under the boat\",\n \"negative_prompt\":\"low quality\",\n \"init_video\":\"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/animatediff-vid2vid-output-1.gif\",\n \"clip_skip\":2,\n \"num_inference_steps\":40,\n \"use_improved_sampling\": false,\n \"guidance_scale\":9.5,\n \"strength\":0.8,\n \"base64\":false,\n \"webhook\": null,\n \"track_id\": null\n}");
Request request = new Request.Builder()
.url("https://modelslab.com/api/v1/enterprise/video/video2video")
.method("POST", body)
.addHeader("Content-Type", "application/json")
.build();
Response response = client.newCall(request).execute();
Response
Example Response
{
"status": "success",
"generationTime": 8.49,
"id": 500,
"output": [
"https://pub-3626123a908346a7a8be8d9295f44e26.r2.dev/generations/05ffff8d-a0ba-4019-9df0-5de966c52ad5.gif"
],
"proxy_links": [
"https://cdn2.stablediffusionapi.com/generations/05ffff8d-a0ba-4019-9df0-5de966c52ad5.gif"
],
"meta": {
"base64": "no",
"clip_skip": null,
"file_prefix": "05ffff8d-a0ba-4019-9df0-5de966c52ad5",
"fps": 7,
"guidance_scale": 7,
"height": 512,
"init_video": "https://pub-3626123a908346a7a8be8d9295f44e26.r2.dev/generations/af5057ce-2a53-4d8f-bdb7-f0e5a6d4064c.mp4",
"instant_response": "no",
"model_id": "midjourney",
"negative_prompt": "low quality",
"num_frames": 16,
"num_inference_steps": 20,
"output_type": "gif",
"prompt": "An astronaut riding a horse",
"seed": 3276424082,
"strength": 0.7,
"temp": "no",
"width": 512
}
}