|
| 1 | +/** |
| 2 | + * |
| 3 | + * Please note: |
| 4 | + * This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). |
| 5 | + * Do not edit this file manually. |
| 6 | + * |
| 7 | + */ |
| 8 | + |
| 9 | +@file:Suppress( |
| 10 | + "ArrayInDataClass", |
| 11 | + "EnumEntryName", |
| 12 | + "RemoveRedundantQualifierName", |
| 13 | + "UnusedImport" |
| 14 | +) |
| 15 | + |
| 16 | +package org.openapi.client.kotlin.openai.models |
| 17 | + |
| 18 | +import com.squareup.moshi.Json |
| 19 | +import java.math.BigDecimal |
| 20 | + |
| 21 | +/** |
| 22 | + * |
| 23 | + * |
| 24 | + * @param model ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. |
| 25 | + * @param messages The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). |
| 26 | + * @param temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. |
| 27 | + * @param topP An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. |
| 28 | + * @param n How many chat completion choices to generate for each input message. |
| 29 | + * @param stream If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. |
| 30 | + * @param stop |
| 31 | + * @param maxTokens The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). |
| 32 | + * @param presencePenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) |
| 33 | + * @param frequencyPenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) |
| 34 | + * @param logitBias Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. |
| 35 | + * @param user A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). |
| 36 | + */ |
| 37 | + |
| 38 | + |
| 39 | +data class CreateChatCompletionRequest ( |
| 40 | + |
| 41 | + /* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. */ |
| 42 | + @Json(name = "model") |
| 43 | + val model: kotlin.String, |
| 44 | + |
| 45 | + /* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). */ |
| 46 | + @Json(name = "messages") |
| 47 | + val messages: Array<ChatCompletionRequestMessage>, |
| 48 | + |
| 49 | + /* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. */ |
| 50 | + @Json(name = "temperature") |
| 51 | + val temperature: BigDecimal? = BigDecimal.ONE, |
| 52 | + |
| 53 | + /* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. */ |
| 54 | + @Json(name = "top_p") |
| 55 | + val topP: BigDecimal? = BigDecimal.ONE, |
| 56 | + |
| 57 | + /* How many chat completion choices to generate for each input message. */ |
| 58 | + @Json(name = "n") |
| 59 | + val n: kotlin.Int? = 1, |
| 60 | + |
| 61 | + /* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. */ |
| 62 | + @Json(name = "stream") |
| 63 | + val stream: kotlin.Boolean? = false, |
| 64 | + |
| 65 | + @Json(name = "stop") |
| 66 | + val stop: Array<String>? = null, |
| 67 | + |
| 68 | + /* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). */ |
| 69 | + @Json(name = "max_tokens") |
| 70 | + val maxTokens: kotlin.Int? = null, |
| 71 | + |
| 72 | + /* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) */ |
| 73 | + @Json(name = "presence_penalty") |
| 74 | + val presencePenalty: BigDecimal? = BigDecimal.ZERO, |
| 75 | + |
| 76 | + /* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) */ |
| 77 | + @Json(name = "frequency_penalty") |
| 78 | + val frequencyPenalty: BigDecimal? = BigDecimal.ZERO, |
| 79 | + |
| 80 | + /* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. */ |
| 81 | + @Json(name = "logit_bias") |
| 82 | + val logitBias: kotlin.Any? = null, |
| 83 | + |
| 84 | + /* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */ |
| 85 | + @Json(name = "user") |
| 86 | + val user: kotlin.String? = null |
| 87 | + |
| 88 | +) |
| 89 | + |
0 commit comments