Configuration params
Influence your model's output with configuration parameters
# Request to OpenAI and modifying output at inference to be more creative and setting a max_tokens limit
# This example is for v1+ of the openai: https://pypi.org/project/openai/
from openai import OpenAI
client = OpenAI(
base_url = "https://turbo.gptboost.io/v1",
api_key = os.getenv("OPENAI_API_KEY"),
)
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "Tell me an interesting fact about zebras"},
],
temperature=0.9,
max_tokens=256
)
print(response.choices[0].message.content)curl --location 'https://turbo.gptboost.io/v1/chat/completions' \
--header 'Authorization: Bearer $OPENAI_API_KEY' \
--header 'Content-Type: application/json' \
--data '{
"model": "gpt-3.5-turbo",
"messages": [
{
"role": "user",
"content": "How to cheerfully greet a girl in Spanish!"
}
],
"temperature": 0.9,
"max_tokens": 33,
"frequency_penalty": 0,
"presence_penalty": 0
}'Last updated