Skip to content

Commit 8ba663f

Browse files
committed
add ChatCompletion and Whisper endpoints, remove test dependency
1 parent 89d32e9 commit 8ba663f

17 files changed

+481
-38
lines changed

build.gradle

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
group 'org.openapi.client.kotlin.openai'
2-
version '2.1.3'
2+
version '4.0.0' // since GPT-4 is available
33

44
buildscript {
55
ext.kotlin_version = '1.8.10'
66
ext.kotlin_coroutine_version = '1.6.4'
7-
ext.kotlintest_junit4_version = '3.4.2'
7+
// ext.kotlintest_junit4_version = '3.4.2'
88
ext.retrofitVersion = '2.9.0'
99
ext.moshi_version = '1.14.0'
1010
ext.okhttp4_version = '4.10.0'
@@ -65,5 +65,5 @@ dependencies {
6565
implementation "com.squareup.retrofit2:retrofit:$retrofitVersion"
6666
implementation "com.squareup.retrofit2:converter-moshi:$retrofitVersion"
6767
implementation "com.squareup.retrofit2:converter-scalars:$retrofitVersion"
68-
testImplementation "io.kotlintest:kotlintest-runner-junit5:$kotlintest_junit4_version"
68+
// testImplementation "io.kotlintest:kotlintest-runner-junit5:$kotlintest_junit4_version"
6969
}

src/main/kotlin/org/openapi/client/kotlin/openai/apis/OpenAIApi.kt

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,18 @@ interface OpenAIApi {
3333
@POST("answers")
3434
suspend fun createAnswer(@Body createAnswerRequest: CreateAnswerRequest): Response<CreateAnswerResponse>
3535

36+
/**
37+
* Creates a completion for the chat message
38+
*
39+
* Responses:
40+
* - 200: OK
41+
*
42+
* @param createChatCompletionRequest
43+
* @return [CreateChatCompletionResponse]
44+
*/
45+
@POST("chat/completions")
46+
suspend fun createChatCompletion(@Body createChatCompletionRequest: CreateChatCompletionRequest): Response<CreateChatCompletionResponse>
47+
3648
/**
3749
* Classifies the specified &#x60;query&#x60; using provided examples. The endpoint first [searches](/docs/api-reference/searches) over the labeled examples to select the ones most relevant for the particular query. Then, the relevant examples are combined with the query to construct a prompt to produce the final label via the [completions](/docs/api-reference/completions) endpoint. Labeled examples can be provided via an uploaded &#x60;file&#x60;, or explicitly listed in the request using the &#x60;examples&#x60; parameter for quick tests and small scale use cases.
3850
*
@@ -220,6 +232,41 @@ interface OpenAIApi {
220232
@POST("engines/{engine_id}/search")
221233
suspend fun createSearch(@Path("engine_id") engineId: kotlin.String, @Body createSearchRequest: CreateSearchRequest): Response<CreateSearchResponse>
222234

235+
/**
236+
* Transcribes audio into the input language.
237+
*
238+
* Responses:
239+
* - 200: OK
240+
*
241+
* @param file The audio file to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
242+
* @param model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
243+
* @param prompt An optional text to guide the model&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. (optional)
244+
* @param responseFormat The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. (optional, default to "json")
245+
* @param temperature The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. (optional, default to 0)
246+
* @param language The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. (optional)
247+
* @return [CreateTranscriptionResponse]
248+
*/
249+
@Multipart
250+
@POST("audio/transcriptions")
251+
suspend fun createTranscription(@Part file: MultipartBody.Part, @Part("model") model: kotlin.String, @Part("prompt") prompt: kotlin.String? = null, @Part("response_format") responseFormat: kotlin.String? = "json", @Part("temperature") temperature: java.math.BigDecimal? = java.math.BigDecimal("0"), @Part("language") language: kotlin.String? = null): Response<CreateTranscriptionResponse>
252+
253+
/**
254+
* Translates audio into into English.
255+
*
256+
* Responses:
257+
* - 200: OK
258+
*
259+
* @param file The audio file to translate, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
260+
* @param model ID of the model to use. Only &#x60;whisper-1&#x60; is currently available.
261+
* @param prompt An optional text to guide the model&#39;s style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. (optional)
262+
* @param responseFormat The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt. (optional, default to "json")
263+
* @param temperature The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. (optional, default to 0)
264+
* @return [CreateTranslationResponse]
265+
*/
266+
@Multipart
267+
@POST("audio/translations")
268+
suspend fun createTranslation(@Part file: MultipartBody.Part, @Part("model") model: kotlin.String, @Part("prompt") prompt: kotlin.String? = null, @Part("response_format") responseFormat: kotlin.String? = "json", @Part("temperature") temperature: java.math.BigDecimal? = java.math.BigDecimal("0")): Response<CreateTranslationResponse>
269+
223270
/**
224271
* Delete a file.
225272
*

src/main/kotlin/org/openapi/client/kotlin/openai/infrastructure/UUIDAdapter.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ package org.openapi.client.kotlin.openai.infrastructure
22

33
import com.squareup.moshi.FromJson
44
import com.squareup.moshi.ToJson
5-
import java.util.*
5+
import java.util.UUID
66

77
class UUIDAdapter {
88
@ToJson
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
/**
2+
*
3+
* Please note:
4+
* This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
5+
* Do not edit this file manually.
6+
*
7+
*/
8+
9+
@file:Suppress(
10+
"ArrayInDataClass",
11+
"EnumEntryName",
12+
"RemoveRedundantQualifierName",
13+
"UnusedImport"
14+
)
15+
16+
package org.openapi.client.kotlin.openai.models
17+
18+
19+
import com.squareup.moshi.Json
20+
21+
/**
22+
*
23+
*
24+
* @param role The role of the author of this message.
25+
* @param content The contents of the message
26+
* @param name The name of the user in a multi-user chat
27+
*/
28+
29+
30+
data class ChatCompletionRequestMessage (
31+
32+
/* The role of the author of this message. */
33+
@Json(name = "role")
34+
val role: ChatCompletionRequestMessage.Role,
35+
36+
/* The contents of the message */
37+
@Json(name = "content")
38+
val content: kotlin.String,
39+
40+
/* The name of the user in a multi-user chat */
41+
@Json(name = "name")
42+
val name: kotlin.String? = null
43+
44+
) {
45+
46+
/**
47+
* The role of the author of this message.
48+
*
49+
* Values: system,user,assistant
50+
*/
51+
enum class Role(val value: kotlin.String) {
52+
@Json(name = "system") system("system"),
53+
@Json(name = "user") user("user"),
54+
@Json(name = "assistant") assistant("assistant");
55+
}
56+
}
57+
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
/**
2+
*
3+
* Please note:
4+
* This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
5+
* Do not edit this file manually.
6+
*
7+
*/
8+
9+
@file:Suppress(
10+
"ArrayInDataClass",
11+
"EnumEntryName",
12+
"RemoveRedundantQualifierName",
13+
"UnusedImport"
14+
)
15+
16+
package org.openapi.client.kotlin.openai.models
17+
18+
19+
import com.squareup.moshi.Json
20+
21+
/**
22+
*
23+
*
24+
* @param role The role of the author of this message.
25+
* @param content The contents of the message
26+
*/
27+
28+
29+
data class ChatCompletionResponseMessage (
30+
31+
/* The role of the author of this message. */
32+
@Json(name = "role")
33+
val role: ChatCompletionResponseMessage.Role,
34+
35+
/* The contents of the message */
36+
@Json(name = "content")
37+
val content: kotlin.String
38+
39+
) {
40+
41+
/**
42+
* The role of the author of this message.
43+
*
44+
* Values: system,user,assistant
45+
*/
46+
enum class Role(val value: kotlin.String) {
47+
@Json(name = "system") system("system"),
48+
@Json(name = "user") user("user"),
49+
@Json(name = "assistant") assistant("assistant");
50+
}
51+
}
52+

src/main/kotlin/org/openapi/client/kotlin/openai/models/CreateAnswerRequest.kt

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
package org.openapi.client.kotlin.openai.models
1717

1818
import com.squareup.moshi.Json
19+
import java.math.BigDecimal
1920

2021
/**
2122
*
@@ -28,7 +29,7 @@ import com.squareup.moshi.Json
2829
* @param file The ID of an uploaded file that contains documents to search over. See [upload file](/docs/api-reference/files/upload) for how to upload a file of the desired format and purpose. You should specify either `documents` or a `file`, but not both.
2930
* @param searchModel ID of the model to use for [Search](/docs/api-reference/searches/create). You can select one of `ada`, `babbage`, `curie`, or `davinci`.
3031
* @param maxRerank The maximum number of documents to be ranked by [Search](/docs/api-reference/searches/create) when using `file`. Setting it to a higher value leads to improved accuracy but with increased latency and cost.
31-
* @param temperature What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values mean the model will take more risks and value 0 (argmax sampling) works better for scenarios with a well-defined answer.
32+
* @param temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
3233
* @param logprobs Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs.
3334
* @param maxTokens The maximum number of tokens allowed for the generated answer
3435
* @param stop
@@ -53,7 +54,7 @@ data class CreateAnswerRequest (
5354

5455
/* List of (question, answer) pairs that will help steer the model towards the tone and answer format you'd like. We recommend adding 2 to 3 examples. */
5556
@Json(name = "examples")
56-
val examples: kotlin.collections.List<kotlin.collections.List<kotlin.String>>,
57+
val examples: List<List<kotlin.String>>,
5758

5859
/* A text snippet containing the contextual information used to generate the answers for the `examples` you provide. */
5960
@Json(name = "examples_context")
@@ -75,9 +76,9 @@ data class CreateAnswerRequest (
7576
@Json(name = "max_rerank")
7677
val maxRerank: kotlin.Int? = 200,
7778

78-
/* What [sampling temperature](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277) to use. Higher values mean the model will take more risks and value 0 (argmax sampling) works better for scenarios with a well-defined answer. */
79+
/* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. */
7980
@Json(name = "temperature")
80-
val temperature: java.math.BigDecimal? = java.math.BigDecimal("0"),
81+
val temperature: BigDecimal? = BigDecimal.ZERO,
8182

8283
/* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. If you need more than this, please contact us through our [Help center](https://help.openai.com) and describe your use case. When `logprobs` is set, `completion` will be automatically added into `expand` to get the logprobs. */
8384
@Json(name = "logprobs")
@@ -108,7 +109,7 @@ data class CreateAnswerRequest (
108109

109110
/* If an object name is in the list, we provide the full information of the object; otherwise, we only provide the object ID. Currently we support `completion` and `file` objects for expansion. */
110111
@Json(name = "expand")
111-
val expand: kotlin.collections.List<kotlin.Any>? = arrayListOf(),
112+
val expand: List<kotlin.Any>? = arrayListOf(),
112113

113114
/* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */
114115
@Json(name = "user")
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
/**
2+
*
3+
* Please note:
4+
* This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
5+
* Do not edit this file manually.
6+
*
7+
*/
8+
9+
@file:Suppress(
10+
"ArrayInDataClass",
11+
"EnumEntryName",
12+
"RemoveRedundantQualifierName",
13+
"UnusedImport"
14+
)
15+
16+
package org.openapi.client.kotlin.openai.models
17+
18+
import com.squareup.moshi.Json
19+
import java.math.BigDecimal
20+
21+
/**
22+
*
23+
*
24+
* @param model ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
25+
* @param messages The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction).
26+
* @param temperature What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
27+
* @param topP An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both.
28+
* @param n How many chat completion choices to generate for each input message.
29+
* @param stream If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message.
30+
* @param stop
31+
* @param maxTokens The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens).
32+
* @param presencePenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
33+
* @param frequencyPenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details)
34+
* @param logitBias Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
35+
* @param user A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
36+
*/
37+
38+
39+
data class CreateChatCompletionRequest (
40+
41+
/* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. */
42+
@Json(name = "model")
43+
val model: kotlin.String,
44+
45+
/* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). */
46+
@Json(name = "messages")
47+
val messages: Array<ChatCompletionRequestMessage>,
48+
49+
/* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. */
50+
@Json(name = "temperature")
51+
val temperature: BigDecimal? = BigDecimal.ONE,
52+
53+
/* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. */
54+
@Json(name = "top_p")
55+
val topP: BigDecimal? = BigDecimal.ONE,
56+
57+
/* How many chat completion choices to generate for each input message. */
58+
@Json(name = "n")
59+
val n: kotlin.Int? = 1,
60+
61+
/* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. */
62+
@Json(name = "stream")
63+
val stream: kotlin.Boolean? = false,
64+
65+
@Json(name = "stop")
66+
val stop: Array<String>? = null,
67+
68+
/* The maximum number of tokens allowed for the generated answer. By default, the number of tokens the model can return will be (4096 - prompt tokens). */
69+
@Json(name = "max_tokens")
70+
val maxTokens: kotlin.Int? = null,
71+
72+
/* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) */
73+
@Json(name = "presence_penalty")
74+
val presencePenalty: BigDecimal? = BigDecimal.ZERO,
75+
76+
/* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) */
77+
@Json(name = "frequency_penalty")
78+
val frequencyPenalty: BigDecimal? = BigDecimal.ZERO,
79+
80+
/* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. */
81+
@Json(name = "logit_bias")
82+
val logitBias: kotlin.Any? = null,
83+
84+
/* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */
85+
@Json(name = "user")
86+
val user: kotlin.String? = null
87+
88+
)
89+
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
/**
2+
*
3+
* Please note:
4+
* This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
5+
* Do not edit this file manually.
6+
*
7+
*/
8+
9+
@file:Suppress(
10+
"ArrayInDataClass",
11+
"EnumEntryName",
12+
"RemoveRedundantQualifierName",
13+
"UnusedImport"
14+
)
15+
16+
package org.openapi.client.kotlin.openai.models
17+
18+
19+
import com.squareup.moshi.Json
20+
21+
/**
22+
* Up to 4 sequences where the API will stop generating further tokens.
23+
*
24+
*/
25+
@Deprecated("This class is deprecated, please use Array<String> instead.")
26+
data class CreateChatCompletionRequestStop (private val value: Any) {
27+
init {
28+
require(value is String || value is Array<*>) {
29+
"value must be initialized with a String or an Array of Strings"
30+
}
31+
}
32+
33+
fun getValue(): Any {
34+
return value
35+
}
36+
}

0 commit comments

Comments
 (0)