Skip to content

Commit 378d3ec

Browse files
authored
Chore/Remove Redundant Check for Image Processing (#5980)
get rid of redundant check for model during image processing
1 parent 0f3b3d6 commit 378d3ec

File tree

20 files changed

+8
-208
lines changed

20 files changed

+8
-208
lines changed

packages/components/nodes/agents/ConversationalAgent/ConversationalAgent.ts

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -10,16 +10,7 @@ import { RunnableSequence } from '@langchain/core/runnables'
1010
import { ChatConversationalAgent } from '@langchain/classic/agents'
1111
import { getBaseClasses, transformBracesWithColon } from '../../../src/utils'
1212
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
13-
import {
14-
IVisionChatModal,
15-
FlowiseMemory,
16-
ICommonObject,
17-
INode,
18-
INodeData,
19-
INodeParams,
20-
IUsedTool,
21-
IServerSideEventStreamer
22-
} from '../../../src/Interface'
13+
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool, IServerSideEventStreamer } from '../../../src/Interface'
2314
import { AgentExecutor } from '../../../src/agents'
2415
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
2516
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
@@ -241,12 +232,9 @@ const prepareAgent = async (
241232
})
242233

243234
if (llmSupportsVision(model)) {
244-
const visionChatModel = model as IVisionChatModal
245235
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
246236

247237
if (messageContent?.length) {
248-
visionChatModel.setVisionModel()
249-
250238
// Pop the `agent_scratchpad` MessagePlaceHolder
251239
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
252240
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
@@ -264,8 +252,6 @@ const prepareAgent = async (
264252

265253
// Add the `agent_scratchpad` MessagePlaceHolder back
266254
prompt.promptMessages.push(messagePlaceholder)
267-
} else {
268-
visionChatModel.revertToOriginalModel()
269255
}
270256
}
271257

packages/components/nodes/agents/ConversationalRetrievalToolAgent/ConversationalRetrievalToolAgent.ts

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -13,16 +13,7 @@ import {
1313
createTextOnlyOutputParser
1414
} from '../../../src/utils'
1515
import { type ToolsAgentStep } from '@langchain/classic/agents/openai/output_parser'
16-
import {
17-
FlowiseMemory,
18-
ICommonObject,
19-
INode,
20-
INodeData,
21-
INodeParams,
22-
IServerSideEventStreamer,
23-
IUsedTool,
24-
IVisionChatModal
25-
} from '../../../src/Interface'
16+
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IServerSideEventStreamer, IUsedTool } from '../../../src/Interface'
2617
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
2718
import { AgentExecutor, ToolCallingAgentOutputParser } from '../../../src/agents'
2819
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
@@ -279,12 +270,9 @@ const prepareAgent = async (
279270
])
280271

281272
if (llmSupportsVision(model)) {
282-
const visionChatModel = model as IVisionChatModal
283273
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
284274

285275
if (messageContent?.length) {
286-
visionChatModel.setVisionModel()
287-
288276
// Pop the `agent_scratchpad` MessagePlaceHolder
289277
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
290278
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
@@ -302,8 +290,6 @@ const prepareAgent = async (
302290

303291
// Add the `agent_scratchpad` MessagePlaceHolder back
304292
prompt.promptMessages.push(messagePlaceholder)
305-
} else {
306-
visionChatModel.revertToOriginalModel()
307293
}
308294
}
309295

packages/components/nodes/agents/ReActAgentChat/ReActAgentChat.ts

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ import type { PromptTemplate } from '@langchain/core/prompts'
66
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
77
import { pull } from 'langchain/hub'
88
import { additionalCallbacks } from '../../../src/handler'
9-
import { IVisionChatModal, FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
9+
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
1010
import { getBaseClasses } from '../../../src/utils'
1111
import { createReactAgent } from '../../../src/agents'
1212
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
@@ -105,14 +105,10 @@ class ReActAgentChat_Agents implements INode {
105105
let chatPromptTemplate = undefined
106106

107107
if (llmSupportsVision(model)) {
108-
const visionChatModel = model as IVisionChatModal
109108
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
110109

111110
if (messageContent?.length) {
112-
// Change model to vision supported
113-
visionChatModel.setVisionModel()
114111
const oldTemplate = prompt.template as string
115-
116112
const msg = HumanMessagePromptTemplate.fromTemplate([
117113
...messageContent,
118114
{
@@ -121,9 +117,6 @@ class ReActAgentChat_Agents implements INode {
121117
])
122118
msg.inputVariables = prompt.inputVariables
123119
chatPromptTemplate = ChatPromptTemplate.fromMessages([msg])
124-
} else {
125-
// revert to previous values if image upload is empty
126-
visionChatModel.revertToOriginalModel()
127120
}
128121
}
129122

packages/components/nodes/agents/ToolAgent/ToolAgent.ts

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -14,16 +14,7 @@ import {
1414
removeInvalidImageMarkdown,
1515
transformBracesWithColon
1616
} from '../../../src/utils'
17-
import {
18-
FlowiseMemory,
19-
ICommonObject,
20-
INode,
21-
INodeData,
22-
INodeParams,
23-
IServerSideEventStreamer,
24-
IUsedTool,
25-
IVisionChatModal
26-
} from '../../../src/Interface'
17+
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IServerSideEventStreamer, IUsedTool } from '../../../src/Interface'
2718
import { ConsoleCallbackHandler, CustomChainHandler, CustomStreamingHandler, additionalCallbacks } from '../../../src/handler'
2819
import { AgentExecutor, ToolCallingAgentOutputParser } from '../../../src/agents'
2920
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
@@ -315,12 +306,9 @@ const prepareAgent = async (
315306
}
316307

317308
if (llmSupportsVision(model)) {
318-
const visionChatModel = model as IVisionChatModal
319309
const messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
320310

321311
if (messageContent?.length) {
322-
visionChatModel.setVisionModel()
323-
324312
// Pop the `agent_scratchpad` MessagePlaceHolder
325313
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
326314
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
@@ -338,8 +326,6 @@ const prepareAgent = async (
338326

339327
// Add the `agent_scratchpad` MessagePlaceHolder back
340328
prompt.promptMessages.push(messagePlaceholder)
341-
} else {
342-
visionChatModel.revertToOriginalModel()
343329
}
344330
}
345331

packages/components/nodes/chains/ConversationChain/ConversationChain.ts

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ import { formatResponse } from '../../outputparsers/OutputParserHelpers'
1616
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
1717
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
1818
import {
19-
IVisionChatModal,
2019
FlowiseMemory,
2120
ICommonObject,
2221
INode,
@@ -232,13 +231,6 @@ const prepareChain = async (nodeData: INodeData, options: ICommonObject, session
232231
let messageContent: MessageContentImageUrl[] = []
233232
if (llmSupportsVision(model)) {
234233
messageContent = await addImagesToMessages(nodeData, options, model.multiModalOption)
235-
const visionChatModel = model as IVisionChatModal
236-
if (messageContent?.length) {
237-
visionChatModel.setVisionModel()
238-
} else {
239-
// revert to previous values if image upload is empty
240-
visionChatModel.revertToOriginalModel()
241-
}
242234
}
243235

244236
const chatPrompt = prepareChatPrompt(nodeData, messageContent)

packages/components/nodes/chains/LLMChain/LLMChain.ts

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -201,8 +201,6 @@ const runPrediction = async (
201201
const visionChatModel = chain.llm as IVisionChatModal
202202
const messageContent = await addImagesToMessages(nodeData, options, visionChatModel.multiModalOption)
203203
if (messageContent?.length) {
204-
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision
205-
visionChatModel.setVisionModel()
206204
// Add image to the message
207205
if (chain.prompt instanceof PromptTemplate) {
208206
const existingPromptTemplate = chain.prompt.template as string
@@ -238,9 +236,6 @@ const runPrediction = async (
238236
// @ts-ignore
239237
chain.prompt.examplePrompt = newFewShotPromptTemplate
240238
}
241-
} else {
242-
// revert to previous values if image upload is empty
243-
visionChatModel.revertToOriginalModel()
244239
}
245240
}
246241

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,6 @@
11
import { IVisionChatModal, IMultiModalOption } from '../../../src'
22
import { ChatBedrockConverse as LCBedrockChat, ChatBedrockConverseInput } from '@langchain/aws'
33

4-
const DEFAULT_IMAGE_MODEL = 'anthropic.claude-3-haiku-20240307-v1:0'
5-
const DEFAULT_IMAGE_MAX_TOKEN = 1024
6-
74
export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
85
configuredModel: string
96
configuredMaxToken?: number
@@ -17,19 +14,7 @@ export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
1714
this.configuredMaxToken = fields?.maxTokens
1815
}
1916

20-
revertToOriginalModel(): void {
21-
this.model = this.configuredModel
22-
this.maxTokens = this.configuredMaxToken
23-
}
24-
2517
setMultiModalOption(multiModalOption: IMultiModalOption): void {
2618
this.multiModalOption = multiModalOption
2719
}
28-
29-
setVisionModel(): void {
30-
if (!this.model.includes('claude-3')) {
31-
this.model = DEFAULT_IMAGE_MODEL
32-
this.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : DEFAULT_IMAGE_MAX_TOKEN
33-
}
34-
}
3520
}

packages/components/nodes/chatmodels/AzureChatOpenAI/FlowiseAzureChatOpenAI.ts

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,19 +17,10 @@ export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVision
1717
this.configuredMaxToken = fields?.maxTokens
1818
}
1919

20-
revertToOriginalModel(): void {
21-
this.model = this.configuredModel
22-
this.maxTokens = this.configuredMaxToken
23-
}
24-
2520
setMultiModalOption(multiModalOption: IMultiModalOption): void {
2621
this.multiModalOption = multiModalOption
2722
}
2823

29-
setVisionModel(): void {
30-
// pass
31-
}
32-
3324
addBuiltInTools(builtInTool: Record<string, any>): void {
3425
this.builtInTools.push(builtInTool)
3526
}

packages/components/nodes/chatmodels/ChatAnthropic/FlowiseChatAnthropic.ts

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,6 @@ import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langch
22
import { type BaseChatModelParams } from '@langchain/core/language_models/chat_models'
33
import { IVisionChatModal, IMultiModalOption } from '../../../src'
44

5-
const DEFAULT_IMAGE_MODEL = 'claude-3-5-haiku-latest'
6-
const DEFAULT_IMAGE_MAX_TOKEN = 2048
7-
85
export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChatModal {
96
configuredModel: string
107
configuredMaxToken: number
@@ -19,19 +16,7 @@ export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChat
1916
this.configuredMaxToken = fields?.maxTokens ?? 2048
2017
}
2118

22-
revertToOriginalModel(): void {
23-
this.modelName = this.configuredModel
24-
this.maxTokens = this.configuredMaxToken
25-
}
26-
2719
setMultiModalOption(multiModalOption: IMultiModalOption): void {
2820
this.multiModalOption = multiModalOption
2921
}
30-
31-
setVisionModel(): void {
32-
if (!this.modelName.startsWith('claude-3')) {
33-
this.modelName = DEFAULT_IMAGE_MODEL
34-
this.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : DEFAULT_IMAGE_MAX_TOKEN
35-
}
36-
}
3722
}

packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI.ts

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -930,16 +930,7 @@ export class ChatGoogleGenerativeAI extends LangchainChatGoogleGenerativeAI impl
930930
}
931931
}
932932

933-
revertToOriginalModel(): void {
934-
this.model = this.configuredModel
935-
this.maxOutputTokens = this.configuredMaxToken
936-
}
937-
938933
setMultiModalOption(multiModalOption: IMultiModalOption): void {
939934
this.multiModalOption = multiModalOption
940935
}
941-
942-
setVisionModel(): void {
943-
// pass
944-
}
945936
}

0 commit comments

Comments
 (0)