Completions
post
Generate a chat completion synchronously using the provided messages and model parameters.
Authorizations
Body
Standard OpenAI chat completion response containing the generated message and metadata.
modelstring · enumRequiredPossible values:
reasoning_effortstring · enumOptionalDefault:
medium
Possible values: frequency_penaltynumber · min: -2 · max: 2OptionalDefault:
0
max_completion_tokensnumber · min: -1 · max: 600OptionalDefault:
128
nnumber · enumOptionalDefault:
1
Possible values: presence_penaltynumber · min: -2 · max: 2OptionalDefault:
0
seednumberOptionalDefault:
106948
service_tierstring · enumOptionalDefault:
default
Possible values: stopstring[]Optional
streambooleanOptionalDefault:
false
temperaturenumber · max: 2OptionalDefault:
1
top_pnumber · max: 1OptionalDefault:
1
tool_choiceany ofOptional
string · enumOptionalPossible values:
parallel_tool_callsbooleanOptionalDefault:
true
Responses
200
Default Response
application/json
400
Default Response
application/json
404
Default Response
application/json
500
Default Response
application/json
post
POST /v1/chat/completions HTTP/1.1
Host: api.distribute.ai
authorization: YOUR_API_KEY
Content-Type: application/json
Accept: */*
Content-Length: 584
{
"messages": [
{
"role": "system",
"content": "text",
"name": "text"
}
],
"model": "Llama-3.1 8B",
"reasoning_effort": "medium",
"metadata": {
"ANY_ADDITIONAL_PROPERTY": "text"
},
"frequency_penalty": 0,
"max_completion_tokens": 128,
"n": 1,
"modalities": [
"text"
],
"presence_penalty": 0,
"seed": 106948,
"service_tier": "default",
"stop": [
"text"
],
"stream": false,
"stream_options": {
"include_usage": true
},
"temperature": 1,
"top_p": 1,
"tools": [
{
"type": "function",
"function": {
"description": "text",
"name": "text",
"parameters": {
"ANY_ADDITIONAL_PROPERTY": "text"
},
"strict": false
}
}
],
"tool_choice": "none",
"parallel_tool_calls": true
}
{
"id": "text",
"choices": [
{
"finish_reason": "stop",
"index": 1,
"message": {
"content": "text",
"tool_calls": [
{
"id": "text",
"type": "function",
"function": {
"name": "text",
"arguments": "text"
}
}
],
"role": "system",
"name": "text"
}
}
],
"created": 1,
"model": "Llama-3.1 8B",
"service_tier": "text",
"system_fingerprint": "text",
"object": "chat.completion",
"usage": {
"completion_tokens": 1,
"prompt_tokens": 1,
"total_tokens": 1
}
}
Last updated