Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 87 additions & 2 deletions docs/components/llms/models/lmstudio.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,56 @@ messages = [
]
m.add(messages, user_id="alice", metadata={"category": "movies"})
```

```typescript TypeScript
import { Memory } from "mem0ai/oss";
import type { MemoryConfig } from "mem0ai/oss";

// Set your OpenAI API key for embeddings
process.env.OPENAI_API_KEY = "your-api-key";

const config: MemoryConfig = {
llm: {
provider: "lmstudio",
config: {
model: "llama-3.2-1b-instruct", // or any model loaded in LMStudio
baseUrl: "http://localhost:1234/v1", // default LM Studio API URL
},
},
embedder: {
provider: "openai", // Using OpenAI for embeddings
config: {
apiKey: process.env.OPENAI_API_KEY,
model: "text-embedding-3-small",
},
},
vectorStore: {
provider: "memory",
config: {
collectionName: "lmstudio-memories",
},
},
};

const memory = new Memory(config);

const messages = [
{ role: "user", content: "I'm planning to watch a movie tonight. Any recommendations?" },
{ role: "assistant", content: "How about a thriller movies? They can be quite engaging." },
{ role: "user", content: "I'm not a big fan of thriller movies but I love sci-fi movies." },
{ role: "assistant", content: "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future." },
];

await memory.add(messages, { userId: "alice", metadata: { category: "movies" } });
```
</CodeGroup>

### Running Completely Locally

You can also use LM Studio for both LLM and embedding to run Mem0 entirely locally:

```python
<CodeGroup>
```python Python
from mem0 import Memory

# No external API keys needed!
Expand All @@ -64,6 +107,47 @@ messages = [
m.add(messages, user_id="alice123", metadata={"category": "movies"})
```

```typescript TypeScript
import { Memory } from "mem0ai/oss";
import type { MemoryConfig } from "mem0ai/oss";

// No external API keys needed!
const config: MemoryConfig = {
llm: {
provider: "lmstudio",
config: {
model: "llama-3.2-1b-instruct", // your loaded LLM model
baseUrl: "http://localhost:1234/v1",
},
},
embedder: {
provider: "lmstudio", // Note: Make sure you have an embedding model loaded in LMStudio
config: {
baseUrl: "http://localhost:1234/v1",
model: "nomic-ai/nomic-embed-text-v1.5-GGUF", // example embedding model
},
},
vectorStore: {
provider: "memory",
config: {
collectionName: "local-memories",
},
},
};

const memory = new Memory(config);

const messages = [
{ role: "user", content: "I'm planning to watch a movie tonight. Any recommendations?" },
{ role: "assistant", content: "How about a thriller movies? They can be quite engaging." },
{ role: "user", content: "I'm not a big fan of thriller movies but I love sci-fi movies." },
{ role: "assistant", content: "Got it! I'll avoid thriller recommendations and suggest sci-fi movies in the future." },
];

await memory.add(messages, { userId: "alice123", metadata: { category: "movies" } });
```
</CodeGroup>

<Note>
When using LM Studio for both LLM and embedding, make sure you have:
1. An LLM model loaded for generating responses
Expand All @@ -75,7 +159,8 @@ m.add(messages, user_id="alice123", metadata={"category": "movies"})
To use LM Studio, you need to:
1. Download and install [LM Studio](https://lmstudio.ai/)
2. Start a local server from the "Server" tab
3. Set the appropriate `lmstudio_base_url` in your configuration (default is usually http://localhost:1234/v1)
3. Set the appropriate `lmstudio_base_url` (Python) or `baseUrl` (TypeScript) in your configuration (default is usually http://localhost:1234/v1)
4. For TypeScript: Install the LM Studio SDK as a peer dependency: `npm install @lmstudio/sdk`
</Note>

## Config
Expand Down
3 changes: 2 additions & 1 deletion mem0-ts/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@
"jest": "^29.7.0",
"nodemon": "^3.0.1",
"prettier": "^3.5.2",
"rimraf": "^5.0.5",
"rimraf": "^5.0.10",
"ts-jest": "^29.2.6",
"ts-node": "^10.9.2",
"tsup": "^8.3.0",
Expand All @@ -101,6 +101,7 @@
"@cloudflare/workers-types": "^4.20250504.0",
"@google/genai": "^1.2.0",
"@langchain/core": "^0.3.44",
"@lmstudio/sdk": "^1.0.0",
"@mistralai/mistralai": "^1.5.2",
"@qdrant/js-client-rest": "1.13.0",
"@supabase/supabase-js": "^2.49.1",
Expand Down
80 changes: 80 additions & 0 deletions mem0-ts/src/oss/examples/llms/lmstudio-example.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
import { Memory } from "../../src/memory";
import { MemoryConfig } from "../../src/types";

async function main() {
// LMStudio configuration
const config: MemoryConfig = {
llm: {
provider: "lmstudio",
config: {
model: "llama-3.2-1b-instruct", // or any model you have loaded in LMStudio
baseUrl: "http://localhost:1234/v1", // default LMStudio server URL
},
},
embedder: {
provider: "openai", // You can use OpenAI embeddings or any other supported embedder
config: {
apiKey: process.env.OPENAI_API_KEY,
model: "text-embedding-3-small",
},
},
vectorStore: {
provider: "memory",
config: {
collectionName: "lmstudio-memories",
},
},
};

// Initialize Memory with LMStudio
const memory = new Memory(config);

const userId = "user-123";

try {
// Add some memories
console.log("Adding memories...");
await memory.add("I love playing guitar and listening to jazz music.", {
userId,
});
await memory.add("I work as a software engineer at a tech startup.", {
userId,
});
await memory.add("My favorite programming language is TypeScript.", {
userId,
});

// Search for memories
console.log("\nSearching for music-related memories:");
const musicMemories = await memory.search("music", { userId });
console.log(musicMemories);

console.log("\nSearching for work-related memories:");
const workMemories = await memory.search("work programming", { userId });
console.log(workMemories);

// Get all memories
console.log("\nAll memories:");
const allMemories = await memory.getAll({ userId });
console.log(allMemories);

// Update a memory
console.log("\nUpdating memory...");
await memory.add(
"I recently started learning to play piano alongside guitar.",
{ userId },
);

// Search again to see updated results
console.log("\nSearching for music after update:");
const updatedMusicMemories = await memory.search("music instruments", {
userId,
});
console.log(updatedMusicMemories);
} catch (error) {
console.error("Error:", error);
}
}

// Run the example
main().catch(console.error);
132 changes: 132 additions & 0 deletions mem0-ts/src/oss/src/llms/lmstudio.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
import { LMStudioClient, Chat } from "@lmstudio/sdk";
import { LLM, LLMResponse } from "./base";
import { LLMConfig, Message } from "../types";
import { logger } from "../utils/logger";

export class LMStudioLLM implements LLM {
private client: LMStudioClient;
private model: string;
private modelHandle: any;
private initialized: boolean = false;

constructor(config: LLMConfig) {
this.client = new LMStudioClient({
baseUrl:
config.config?.baseUrl || config.baseURL || "http://localhost:1234/v1",
});
this.model = config.model || "llama-3.2-1b-instruct";
this.initializeModel();
}

private async initializeModel(): Promise<void> {
if (this.initialized) {
return;
}

try {
// Get the model handle from LMStudio client
this.modelHandle = await this.client.llm.model(this.model);
this.initialized = true;
logger.info(`LMStudio model ${this.model} initialized successfully`);
} catch (error) {
logger.error(`Error initializing LMStudio model ${this.model}: ${error}`);
throw error;
}
}

async generateResponse(
messages: Message[],
responseFormat?: { type: string },
tools?: any[],
): Promise<string | LLMResponse> {
await this.initializeModel();

try {
// Convert messages to LMStudio format
const lmStudioMessages = messages.map((msg) => ({
role: msg.role as "system" | "user" | "assistant",
content:
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content),
}));

// Create chat context
const chat = Chat.from(lmStudioMessages);

// Configure prediction parameters
const predictionConfig: any = {};

if (responseFormat?.type === "json_object") {
predictionConfig.structured = true;
}

// Tools are not directly supported in the same way as OpenAI
// LMStudio may handle tool calls differently or not at all
if (tools) {
logger.warn(
"Tool calls may not be fully supported by LMStudio integration",
);
}

// Generate response
const prediction = this.modelHandle.respond(chat, predictionConfig);
let fullContent = "";

// Collect the streamed response
for await (const { content } of prediction) {
fullContent += content;
}

// For simple text responses
if (!tools) {
return fullContent;
}

// For tool calls (basic support)
return {
content: fullContent,
role: "assistant",
toolCalls: [], // LMStudio may not support tool calls in the same format
};
} catch (error) {
logger.error(`Error generating response with LMStudio: ${error}`);
throw error;
}
}

async generateChat(messages: Message[]): Promise<LLMResponse> {
await this.initializeModel();

try {
// Convert messages to LMStudio format
const lmStudioMessages = messages.map((msg) => ({
role: msg.role as "system" | "user" | "assistant",
content:
typeof msg.content === "string"
? msg.content
: JSON.stringify(msg.content),
}));

// Create chat context
const chat = Chat.from(lmStudioMessages);

// Generate response
const prediction = this.modelHandle.respond(chat);
let fullContent = "";

// Collect the streamed response
for await (const { content } of prediction) {
fullContent += content;
}

return {
content: fullContent,
role: "assistant",
};
} catch (error) {
logger.error(`Error generating chat response with LMStudio: ${error}`);
throw error;
}
}
}
3 changes: 3 additions & 0 deletions mem0-ts/src/oss/src/utils/factory.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import { AzureOpenAIEmbedder } from "../embeddings/azure";
import { LangchainLLM } from "../llms/langchain";
import { LangchainEmbedder } from "../embeddings/langchain";
import { LangchainVectorStore } from "../vector_stores/langchain";
import { LMStudioLLM } from "../llms/lmstudio";

export class EmbedderFactory {
static create(provider: string, config: EmbeddingConfig): Embedder {
Expand Down Expand Up @@ -74,6 +75,8 @@ export class LLMFactory {
return new MistralLLM(config);
case "langchain":
return new LangchainLLM(config);
case "lmstudio":
return new LMStudioLLM(config);
default:
throw new Error(`Unsupported LLM provider: ${provider}`);
}
Expand Down
Loading