Skip to content
This repository was archived by the owner on Feb 11, 2025. It is now read-only.

Commit 76b7ffa

Browse files
committed
Fix unit tests, trigger completion callback when connection is closed, avoid NPE in client timeout
1 parent 4af45de commit 76b7ffa

File tree

6 files changed

+38
-78
lines changed

6 files changed

+38
-78
lines changed

src/main/java/dev/ai4j/openai4j/StreamingRequestExecutor.java

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,6 @@ public void onEvent(EventSource eventSource, String id, String type, String data
167167
}
168168

169169
if ("[DONE]".equals(data)) {
170-
streamingCompletionCallback.run();
171170
return;
172171
}
173172

@@ -192,6 +191,8 @@ public void onClosed(EventSource eventSource) {
192191
if (logStreamingResponses) {
193192
log.debug("onClosed()");
194193
}
194+
195+
streamingCompletionCallback.run();
195196
}
196197

197198
@Override
@@ -208,7 +209,9 @@ public void onFailure(EventSource eventSource, Throwable t, okhttp3.Response res
208209

209210
if (logStreamingResponses) {
210211
log.debug("onFailure()", t);
211-
responseLogger.log(response);
212+
if (response != null) {
213+
responseLogger.log(response);
214+
}
212215
}
213216

214217
if (t != null) {

src/main/java/dev/ai4j/openai4j/chat/ChatCompletionModel.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ public enum ChatCompletionModel {
77
GPT_3_5_TURBO_0125("gpt-3.5-turbo-0125"),
88

99
GPT_4("gpt-4"), // alias
10-
GPT_4_0314("gpt-4-0314"),
1110
GPT_4_0613("gpt-4-0613"),
1211

1312
GPT_4_TURBO("gpt-4-turbo"), // alias

src/test/java/dev/ai4j/openai4j/chat/ChatCompletionAsyncTest.java

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,6 @@ void testCustomizableApi(ChatCompletionModel model) throws Exception {
9696
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
9797
"GPT_3_5_TURBO_0125", // don't have access to it yet
9898
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
99-
"GPT_4_0314", // Does not support tools/functions
10099
"GPT_4_VISION_PREVIEW" // Does not support many things now, including tools
101100
})
102101
void testTools(ChatCompletionModel model) throws Exception {
@@ -161,7 +160,6 @@ void testTools(ChatCompletionModel model) throws Exception {
161160
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
162161
"GPT_3_5_TURBO_0125", // don't have access to it yet
163162
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
164-
"GPT_4_0314", // Does not support tools/functions
165163
"GPT_4_VISION_PREVIEW" // Does not support many things now, including functions
166164
})
167165
void testFunctions(ChatCompletionModel model) throws Exception {
@@ -220,7 +218,6 @@ void testFunctions(ChatCompletionModel model) throws Exception {
220218
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
221219
"GPT_3_5_TURBO_0125", // don't have access to it yet
222220
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
223-
"GPT_4_0314", // Does not support tools/functions
224221
"GPT_4_VISION_PREVIEW" // does not support many things now, including tools
225222
})
226223
void testToolChoice(ChatCompletionModel model) throws Exception {
@@ -285,7 +282,6 @@ void testToolChoice(ChatCompletionModel model) throws Exception {
285282
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
286283
"GPT_3_5_TURBO_0125", // don't have access to it yet
287284
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
288-
"GPT_4_0314", // Does not support tools/functions
289285
"GPT_4_VISION_PREVIEW"
290286
})
291287
void testFunctionChoice(ChatCompletionModel model) throws Exception {

src/test/java/dev/ai4j/openai4j/chat/ChatCompletionStreamingTest.java

Lines changed: 19 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,13 @@
66
import org.junit.jupiter.api.Test;
77
import org.junit.jupiter.params.ParameterizedTest;
88
import org.junit.jupiter.params.provider.EnumSource;
9+
import org.slf4j.Logger;
10+
import org.slf4j.LoggerFactory;
911

1012
import java.util.Map;
1113
import java.util.concurrent.CompletableFuture;
1214
import java.util.concurrent.atomic.AtomicBoolean;
15+
import java.util.concurrent.atomic.AtomicReference;
1316

1417
import static dev.ai4j.openai4j.chat.ChatCompletionModel.GPT_4O;
1518
import static dev.ai4j.openai4j.chat.ChatCompletionTest.*;
@@ -22,11 +25,14 @@
2225
import static java.util.concurrent.Executors.newSingleThreadExecutor;
2326
import static java.util.concurrent.TimeUnit.SECONDS;
2427
import static org.assertj.core.api.Assertions.assertThat;
28+
import static org.junit.jupiter.api.Assertions.fail;
2529
import static org.junit.jupiter.params.provider.EnumSource.Mode.EXCLUDE;
2630
import static org.junit.jupiter.params.provider.EnumSource.Mode.INCLUDE;
2731

2832
class ChatCompletionStreamingTest extends RateLimitAwareTest {
2933

34+
private static final Logger log = LoggerFactory.getLogger(ChatCompletionStreamingTest.class);
35+
3036
private final OpenAiClient client = OpenAiClient.builder()
3137
.baseUrl(System.getenv("OPENAI_BASE_URL"))
3238
.openAiApiKey(System.getenv("OPENAI_API_KEY"))
@@ -106,7 +112,6 @@ void testCustomizableApi(ChatCompletionModel model) throws Exception {
106112
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
107113
"GPT_3_5_TURBO_0125", // don't have access to it yet
108114
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
109-
"GPT_4_0314", // Does not support tools/functions
110115
"GPT_4_VISION_PREVIEW" // Does not support many things now, including logit_bias and response_format
111116
})
112117
void testTools(ChatCompletionModel model) throws Exception {
@@ -224,7 +229,6 @@ void testTools(ChatCompletionModel model) throws Exception {
224229
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
225230
"GPT_3_5_TURBO_0125", // don't have access to it yet
226231
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
227-
"GPT_4_0314", // Does not support tools/functions
228232
"GPT_4_VISION_PREVIEW" // Does not support many things now, including logit_bias and response_format
229233
})
230234
void testFunctions(ChatCompletionModel model) throws Exception {
@@ -322,7 +326,6 @@ void testFunctions(ChatCompletionModel model) throws Exception {
322326
"GPT_3_5_TURBO_0125", // don't have access to it yet
323327
"GPT_4_TURBO_PREVIEW", // keeps returning "felsius" as temp unit
324328
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
325-
"GPT_4_0314", // Does not support tools/functions
326329
"GPT_4_VISION_PREVIEW" // Does not support many things now, including logit_bias and response_format
327330
})
328331
void testToolChoice(ChatCompletionModel model) throws Exception {
@@ -440,7 +443,6 @@ void testToolChoice(ChatCompletionModel model) throws Exception {
440443
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
441444
"GPT_3_5_TURBO_0125", // don't have access to it yet
442445
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
443-
"GPT_4_0314", // Does not support tools/functions
444446
"GPT_4_VISION_PREVIEW"
445447
})
446448
void testFunctionChoice(ChatCompletionModel model) throws Exception {
@@ -769,45 +771,27 @@ void testCancelStreamingAfterStreamingStarted() throws Exception {
769771
.logStreamingResponses()
770772
.build();
771773

772-
AtomicBoolean streamingStarted = new AtomicBoolean(false);
773-
AtomicBoolean streamingCancelled = new AtomicBoolean(false);
774-
AtomicBoolean cancellationSucceeded = new AtomicBoolean(true);
774+
final AtomicBoolean streamingCancelled = new AtomicBoolean(false);
775+
final AtomicReference<ResponseHandle> atomicReference = new AtomicReference<>();
776+
final CompletableFuture<Void> completableFuture = new CompletableFuture<>();
775777

776778
ResponseHandle responseHandle = client.chatCompletion("Write a poem about AI in 10 words")
777779
.onPartialResponse(partialResponse -> {
778-
streamingStarted.set(true);
779-
System.out.println("[[streaming started]]");
780-
if (streamingCancelled.get()) {
781-
cancellationSucceeded.set(false);
782-
System.out.println("[[cancellation failed]]");
780+
if (! streamingCancelled.getAndSet(true)) {
781+
log.info("Executor thread {}", Thread.currentThread());
782+
atomicReference.get().cancel();
783+
completableFuture.complete(null);
783784
}
784785
})
785-
.onComplete(() -> {
786-
cancellationSucceeded.set(false);
787-
System.out.println("[[cancellation failed]]");
788-
})
789-
.onError(e -> {
790-
cancellationSucceeded.set(false);
791-
System.out.println("[[cancellation failed]]");
792-
})
786+
.onComplete(() -> fail("Response completed"))
787+
.onError(e -> fail("Response errored"))
793788
.execute();
794789

795-
while (!streamingStarted.get()) {
796-
Thread.sleep(10);
797-
}
790+
log.info("Test thread {}", Thread.currentThread());
791+
atomicReference.set(responseHandle);
792+
completableFuture.get();
798793

799-
newSingleThreadExecutor().execute(() -> {
800-
responseHandle.cancel();
801-
streamingCancelled.set(true);
802-
System.out.println("[[streaming cancelled]]");
803-
});
804-
805-
while (!streamingCancelled.get()) {
806-
Thread.sleep(10);
807-
}
808-
Thread.sleep(2000);
809-
810-
assertThat(cancellationSucceeded).isTrue();
794+
assertThat(streamingCancelled).isTrue();
811795
}
812796

813797
@Test

src/test/java/dev/ai4j/openai4j/chat/ChatCompletionTest.java

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,6 @@ void testCustomizableApi(ChatCompletionModel model) {
9191
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
9292
"GPT_3_5_TURBO_0125", // don't have access to it yet
9393
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
94-
"GPT_4_0314", // Does not support tools/functions
9594
"GPT_4_VISION_PREVIEW" // Does not support many things now, including logit_bias and response_format
9695
})
9796
void testTools(ChatCompletionModel model) {
@@ -149,7 +148,6 @@ void testTools(ChatCompletionModel model) {
149148
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
150149
"GPT_3_5_TURBO_0125", // don't have access to it yet
151150
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
152-
"GPT_4_0314", // Does not support tools/functions
153151
"GPT_4_VISION_PREVIEW" // Does not support many things now, including logit_bias and response_format
154152
})
155153
void testFunctions(ChatCompletionModel model) {
@@ -201,7 +199,6 @@ void testFunctions(ChatCompletionModel model) {
201199
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
202200
"GPT_3_5_TURBO_0125", // don't have access to it yet
203201
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
204-
"GPT_4_0314", // Does not support tools/functions
205202
"GPT_4_VISION_PREVIEW" // Does not support many things now, including logit_bias and response_format
206203
})
207204
void testToolChoice(ChatCompletionModel model) {
@@ -259,7 +256,6 @@ void testToolChoice(ChatCompletionModel model) {
259256
@EnumSource(value = ChatCompletionModel.class, mode = EXCLUDE, names = {
260257
"GPT_3_5_TURBO_0125", // don't have access to it yet
261258
"GPT_4_32K", "GPT_4_32K_0314", "GPT_4_32K_0613", // I don't have access to these models
262-
"GPT_4_0314", // Does not support tools/functions
263259
"GPT_4_VISION_PREVIEW" // Does not support many things now, including logit_bias and response_format
264260
})
265261
void testFunctionChoice(ChatCompletionModel model) {

src/test/java/dev/ai4j/openai4j/completion/CompletionStreamingTest.java

Lines changed: 14 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,12 @@
77

88
import java.util.concurrent.CompletableFuture;
99
import java.util.concurrent.atomic.AtomicBoolean;
10+
import java.util.concurrent.atomic.AtomicReference;
1011

1112
import static java.util.concurrent.Executors.newSingleThreadExecutor;
1213
import static java.util.concurrent.TimeUnit.SECONDS;
1314
import static org.assertj.core.api.Assertions.assertThat;
15+
import static org.junit.jupiter.api.Assertions.fail;
1416

1517
class CompletionStreamingTest extends RateLimitAwareTest {
1618

@@ -65,7 +67,7 @@ void testCustomizableApi() throws Exception {
6567
}
6668

6769
@Test
68-
void testCancelStreamingAfterStreamingStarted() throws InterruptedException {
70+
void testCancelStreamingAfterStreamingStarted() throws Exception {
6971

7072
OpenAiClient client = OpenAiClient.builder()
7173
// without caching
@@ -75,45 +77,25 @@ void testCancelStreamingAfterStreamingStarted() throws InterruptedException {
7577
.logStreamingResponses()
7678
.build();
7779

78-
AtomicBoolean streamingStarted = new AtomicBoolean(false);
79-
AtomicBoolean streamingCancelled = new AtomicBoolean(false);
80-
AtomicBoolean cancellationSucceeded = new AtomicBoolean(true);
80+
final AtomicBoolean streamingCancelled = new AtomicBoolean(false);
81+
final AtomicReference<ResponseHandle> atomicReference = new AtomicReference<>();
82+
final CompletableFuture<Void> completableFuture = new CompletableFuture<>();
8183

8284
ResponseHandle responseHandle = client.completion("Write a poem about AI in 10 words")
8385
.onPartialResponse(partialResponse -> {
84-
streamingStarted.set(true);
85-
System.out.println("[[streaming started]]");
86-
if (streamingCancelled.get()) {
87-
cancellationSucceeded.set(false);
88-
System.out.println("[[cancellation failed]]");
86+
if (! streamingCancelled.getAndSet(true)) {
87+
atomicReference.get().cancel();
88+
completableFuture.complete(null);
8989
}
9090
})
91-
.onComplete(() -> {
92-
cancellationSucceeded.set(false);
93-
System.out.println("[[cancellation failed]]");
94-
})
95-
.onError(e -> {
96-
cancellationSucceeded.set(false);
97-
System.out.println("[[cancellation failed]]");
98-
})
91+
.onComplete(() -> fail("Response completed"))
92+
.onError(e -> fail("Response errored"))
9993
.execute();
10094

101-
while (!streamingStarted.get()) {
102-
Thread.sleep(10);
103-
}
104-
105-
newSingleThreadExecutor().execute(() -> {
106-
responseHandle.cancel();
107-
streamingCancelled.set(true);
108-
System.out.println("[[streaming cancelled]]");
109-
});
110-
111-
while (!streamingCancelled.get()) {
112-
Thread.sleep(10);
113-
}
114-
Thread.sleep(2000);
95+
atomicReference.set(responseHandle);
96+
completableFuture.get();
11597

116-
assertThat(cancellationSucceeded).isTrue();
98+
assertThat(streamingCancelled).isTrue();
11799
}
118100

119101
@Test

0 commit comments

Comments
 (0)