diff --git a/README.md b/README.md
index c7e4a77..fc13bcf 100644
--- a/README.md
+++ b/README.md
@@ -30,7 +30,7 @@ Add the following dependency to your `pom.xml`:
ai.z.openapi
zai-sdk
- 0.0.5
+ 0.0.6
```
@@ -39,7 +39,7 @@ Add the following dependency to your `build.gradle` (for Groovy DSL):
```groovy
dependencies {
- implementation 'ai.z.openapi:zai-sdk:0.0.5'
+ implementation 'ai.z.openapi:zai-sdk:0.0.6'
}
```
@@ -124,7 +124,7 @@ ZaiClient client = ZaiClient.builder()
// Create chat request
ChatCompletionCreateParams request = ChatCompletionCreateParams.builder()
- .model(Constants.ModelChatGLM4)
+ .model("glm-4.6")
.messages(Arrays.asList(
ChatMessage.builder()
.role(ChatMessageRole.USER.value())
@@ -152,7 +152,7 @@ if (response.isSuccess()) {
```java
// Create streaming request
ChatCompletionCreateParams streamRequest = ChatCompletionCreateParams.builder()
- .model(Constants.ModelChatGLM4)
+ .model("glm-4.6")
.messages(Arrays.asList(
ChatMessage.builder()
.role(ChatMessageRole.USER.value())
@@ -281,7 +281,7 @@ public class AIController {
@PostMapping("/chat")
public ResponseEntity chat(@RequestBody ChatRequest request) {
ChatCompletionCreateParams params = ChatCompletionCreateParams.builder()
- .model(Constants.ModelChatGLM4)
+ .model("glm-4.6")
.messages(Arrays.asList(
ChatMessage.builder()
.role(ChatMessageRole.USER.value())
diff --git a/README_CN.md b/README_CN.md
index 481941b..7def537 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -30,7 +30,7 @@ Z.ai AI 平台官方 Java SDK,提供统一接口访问强大的AI能力,包
ai.z.openapi
zai-sdk
- 0.0.5
+ 0.0.6
```
@@ -39,7 +39,7 @@ Z.ai AI 平台官方 Java SDK,提供统一接口访问强大的AI能力,包
```groovy
dependencies {
- implementation 'ai.z.openapi:zai-sdk:0.0.5'
+ implementation 'ai.z.openapi:zai-sdk:0.0.6'
}
```
@@ -123,7 +123,7 @@ ZaiClient client = ZaiClient.builder()
// 创建对话请求
ChatCompletionCreateParams request = ChatCompletionCreateParams.builder()
- .model(Constants.ModelChatGLM4)
+ .model("glm-4.6")
.messages(Arrays.asList(
ChatMessage.builder()
.role(ChatMessageRole.USER.value())
@@ -151,7 +151,7 @@ if (response.isSuccess()) {
```java
// 创建流式请求
ChatCompletionCreateParams streamRequest = ChatCompletionCreateParams.builder()
- .model(Constants.ModelChatGLM4)
+ .model("glm-4.6")
.messages(Arrays.asList(
ChatMessage.builder()
.role(ChatMessageRole.USER.value())
@@ -282,7 +282,7 @@ public class AIController {
@PostMapping("/chat")
public ResponseEntity chat(@RequestBody ChatRequest request) {
ChatCompletionCreateParams params = ChatCompletionCreateParams.builder()
- .model(Constants.ModelChatGLM4)
+ .model("glm-4.6")
.messages(Arrays.asList(
ChatMessage.builder()
.role(ChatMessageRole.USER.value())
diff --git a/core/src/main/java/ai/z/openapi/core/Constants.java b/core/src/main/java/ai/z/openapi/core/Constants.java
index ed8d492..12557b3 100644
--- a/core/src/main/java/ai/z/openapi/core/Constants.java
+++ b/core/src/main/java/ai/z/openapi/core/Constants.java
@@ -35,6 +35,21 @@ private Constants() {
// Text Generation Models
// =============================================================================
+ /**
+ * GLM-4.6 model code
+ */
+ public static final String ModelGLM4_6 = "glm-4.6";
+
+ /**
+ * GLM-4.6-air model code
+ */
+ public static final String ModelGLM4_6_AIR = "glm-4.6-air";
+
+ /**
+ * GLM-4.6-flash model code
+ */
+ public static final String ModelGLM4_6_FLASH = "glm-4.6-flash";
+
/**
* GLM-4.5 model code
*/
diff --git a/core/src/main/java/ai/z/openapi/core/token/HttpRequestInterceptor.java b/core/src/main/java/ai/z/openapi/core/token/HttpRequestInterceptor.java
index e87e0d9..3432a08 100644
--- a/core/src/main/java/ai/z/openapi/core/token/HttpRequestInterceptor.java
+++ b/core/src/main/java/ai/z/openapi/core/token/HttpRequestInterceptor.java
@@ -40,7 +40,7 @@ public Response intercept(Chain chain) throws IOException {
.newBuilder()
.header("Authorization", "Bearer " + accessToken)
.header("x-source-channel", source_channel)
- .header("Zai-SDK-Ver", "0.0.5")
+ .header("Zai-SDK-Ver", "0.0.6")
.header("Accept-Language", "en-US,en");
if (Objects.nonNull(config.getCustomHeaders())) {
for (Map.Entry entry : config.getCustomHeaders().entrySet()) {
diff --git a/core/src/main/java/ai/z/openapi/service/model/ChatCompletionCreateParams.java b/core/src/main/java/ai/z/openapi/service/model/ChatCompletionCreateParams.java
index adcd712..10d343c 100644
--- a/core/src/main/java/ai/z/openapi/service/model/ChatCompletionCreateParams.java
+++ b/core/src/main/java/ai/z/openapi/service/model/ChatCompletionCreateParams.java
@@ -42,21 +42,19 @@ public class ChatCompletionCreateParams extends CommonRequest implements ClientR
private Boolean stream;
/**
- * Sampling temperature, controls output randomness, must be positive Range:
- * (0.0,1.0], cannot equal 0, default value is 0.95 Higher values make output more
- * random and creative; lower values make output more stable or deterministic It's
- * recommended to adjust either top_p or temperature parameter based on your use case,
- * but not both simultaneously
+ * Sampling temperature, controls output randomness, must be positive Range: [0.0,1.0]
+ * default value is 0.95 Higher values make output more random and creative; lower
+ * values make output more stable or deterministic It's recommended to adjust either
+ * top_p or temperature parameter based on your use case, but not both simultaneously
*/
private Float temperature;
/**
- * Another method for temperature sampling, called nucleus sampling Range: (0.0, 1.0)
- * open interval, cannot equal 0 or 1, default value is 0.7 Model considers results
- * with top_p probability mass tokens For example: 0.1 means the model decoder only
- * considers tokens from the top 10% probability candidate set It's recommended to
- * adjust either top_p or temperature parameter based on your use case, but not both
- * simultaneously
+ * Another method for temperature sampling, called nucleus sampling Range: (0.0, 1.0]
+ * Model considers results with top_p probability mass tokens For example: 0.1 means
+ * the model decoder only considers tokens from the top 10% probability candidate set
+ * It's recommended to adjust either top_p or temperature parameter based on your use
+ * case, but not both simultaneously
*/
@JsonProperty("top_p")
private Float topP;
@@ -118,6 +116,11 @@ public class ChatCompletionCreateParams extends CommonRequest implements ClientR
*/
private ChatThinking thinking;
+ /**
+ * Whether to stream tool calls
+ */
+ private Boolean tool_stream;
+
/**
* Forced watermark switch
*/
diff --git a/pom.xml b/pom.xml
index 3324568..3e46f03 100644
--- a/pom.xml
+++ b/pom.xml
@@ -45,7 +45,7 @@
- 0.0.5.1
+ 0.0.6
8
UTF-8
UTF-8
diff --git a/samples/src/main/ai.z.openapi.samples/ChatAsyncCompletionExample.java b/samples/src/main/ai.z.openapi.samples/ChatAsyncCompletionExample.java
index 6b2ff83..2af9aef 100644
--- a/samples/src/main/ai.z.openapi.samples/ChatAsyncCompletionExample.java
+++ b/samples/src/main/ai.z.openapi.samples/ChatAsyncCompletionExample.java
@@ -30,7 +30,7 @@ public static void main(String[] args) {
// Create chat request
ChatCompletionCreateParams request = ChatCompletionCreateParams.builder()
- .model(Constants.ModelChatGLM4_5)
+ .model("glm-4.6")
.messages(Arrays.asList(
ChatMessage.builder()
.role(ChatMessageRole.USER.value())
diff --git a/samples/src/main/ai.z.openapi.samples/ChatCompletionExample.java b/samples/src/main/ai.z.openapi.samples/ChatCompletionExample.java
index 127b6f2..423c93f 100644
--- a/samples/src/main/ai.z.openapi.samples/ChatCompletionExample.java
+++ b/samples/src/main/ai.z.openapi.samples/ChatCompletionExample.java
@@ -25,7 +25,7 @@ public static void main(String[] args) {
// Create chat request
ChatCompletionCreateParams request = ChatCompletionCreateParams.builder()
- .model(Constants.ModelChatGLM4_5)
+ .model("glm-4.6")
.messages(Arrays.asList(
ChatMessage.builder()
.role(ChatMessageRole.USER.value())
diff --git a/samples/src/main/ai.z.openapi.samples/ChatCompletionStreamExample.java b/samples/src/main/ai.z.openapi.samples/ChatCompletionStreamExample.java
index e573a48..afa5352 100644
--- a/samples/src/main/ai.z.openapi.samples/ChatCompletionStreamExample.java
+++ b/samples/src/main/ai.z.openapi.samples/ChatCompletionStreamExample.java
@@ -18,7 +18,7 @@ public static void main(String[] args) {
// Create chat request
ChatCompletionCreateParams streamRequest = ChatCompletionCreateParams.builder()
- .model(Constants.ModelChatGLM4_5)
+ .model("glm-4.6")
.messages(Arrays.asList(
ChatMessage.builder()
.role(ChatMessageRole.USER.value())
diff --git a/samples/src/main/ai.z.openapi.samples/ChatCompletionWithCustomHeadersExample.java b/samples/src/main/ai.z.openapi.samples/ChatCompletionWithCustomHeadersExample.java
index dc3f917..ff51652 100644
--- a/samples/src/main/ai.z.openapi.samples/ChatCompletionWithCustomHeadersExample.java
+++ b/samples/src/main/ai.z.openapi.samples/ChatCompletionWithCustomHeadersExample.java
@@ -22,7 +22,7 @@ public static void main(String[] args) {
// Create chat request
ChatCompletionCreateParams request = ChatCompletionCreateParams.builder()
- .model(Constants.ModelChatGLM4_5)
+ .model("glm-4.6")
.messages(Arrays.asList(
ChatMessage.builder()
.role(ChatMessageRole.USER.value())