Skip to content

Commit feb6748

Browse files
authored
Merge pull request #42 from microsoft/0.1.3.6
Update to v0.1.3.6
2 parents 62a2747 + 3c3478f commit feb6748

File tree

6 files changed

+65
-20
lines changed

6 files changed

+65
-20
lines changed

README.md

Lines changed: 28 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -274,17 +274,38 @@ with TraceGraph coming soon).
274274
## LLM API Setup
275275

276276
Currently we rely on [LiteLLM](https://github.com/BerriAI/litellm) or [AutoGen v0.2](https://github.com/microsoft/autogen/tree/0.2) for LLM caching and API-Key management.
277-
By default, LiteLLM is used. To use it, set the keys as the environment variables, e.g.
277+
278+
By default, LiteLLM is used. To change the default backend, set the environment variable `TRACE_DEFAULT_LLM_BACKEND` on terminal
279+
```bash
280+
export TRACE_DEFAULT_LLM_BACKEND="<your LLM backend here>" # 'LiteLLM' or 'AutoGen`
281+
```
282+
or in python before importing `opto`
283+
```python
284+
import os
285+
os.environ["TRACE_DEFAULT_LLM_BACKEND"] = "<your LLM backend here>" # 'LiteLLM' or 'AutoGen`
286+
import opto
287+
```
288+
289+
290+
291+
### Using LiteLLM as Backend
292+
293+
Set the keys as the environment variables, following the [documentation of LiteLLM](https://docs.litellm.ai/docs/providers). For example,
278294

279295
```python
280296
import os
281-
os.environ["OPENAI_API_KEY"] = "your-openai-key"
282-
os.environ["ANTHROPIC_API_KEY"] = "your-anthropic-key"
297+
os.environ["OPENAI_API_KEY"] = "<your OpenAI API key here>"
298+
os.environ["ANTHROPIC_API_KEY"] = "<your Anthropic API key here>"
299+
```
300+
In Trace, we add another environment variable `TRACE_LITELLM_MODEL` to set the default model name used by LiteLLM for convenience, e.g.,
301+
```bash
302+
export TRACE_LITELLM_MODEL='gpt-4o'
283303
```
304+
will set all LLM instances in Trace to use `gpt-4o` by default.
284305

285-
Please see the [documentation of LiteLLM](https://docs.litellm.ai/docs/providers) for more details on setting keys and end-point urls.
286306

287-
On the other hand, to use AutoGen, install Trace with autogen flag, `pip install trace-opt[autogen]`. AutoGen relies on `OAI_CONFIG_LIST`, which is a file you put in your working directory. It has the format of:
307+
### Using AutoGen as Backend
308+
First install Trace with autogen flag, `pip install trace-opt[autogen]`. AutoGen relies on `OAI_CONFIG_LIST`, which is a file you put in your working directory. It has the format of:
288309

289310
```json lines
290311
[
@@ -298,7 +319,8 @@ On the other hand, to use AutoGen, install Trace with autogen flag, `pip install
298319
}
299320
]
300321
```
301-
You switch between different LLM models by changing the `model` field in this configuration file.
322+
You can switch between different LLM models by changing the `model` field in this configuration file.
323+
Note AutoGen by default will use the first model available in this config file.
302324

303325
You can also set an `os.environ` variable `OAI_CONFIG_LIST` to point to the location of this file or directly set a JSON string as the value of this variable.
304326

opto/trace/iterators.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,14 +4,14 @@
44
from opto.trace.bundle import bundle
55
import opto.trace.operators as ops
66
from opto.trace.errors import ExecutionError
7-
7+
import numpy as np
88

99
# List[Nodes], Node[List]
1010
def iterate(x: Any):
1111
"""Return an iterator object for node of list, tuple, set, or dict."""
1212
if not isinstance(x, Node):
1313
x = node(x)
14-
if issubclass(x.type, list) or issubclass(x.type, tuple) or issubclass(x.type, str):
14+
if issubclass(x.type, list) or issubclass(x.type, tuple) or issubclass(x.type, str) or issubclass(x.type, np.ndarray):
1515
return SeqIterable(x)
1616
elif issubclass(x.type, set):
1717
converted_list = ops.to_list(x)

opto/utils/llm.py

Lines changed: 23 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import litellm
66
import os
77
import openai
8+
import warnings
89

910
try:
1011
import autogen # We import autogen here to avoid the need of installing autogen
@@ -153,15 +154,19 @@ class LiteLLM(AbstractModel):
153154
154155
To use this, set the credentials through the environment variable as
155156
instructed in the LiteLLM documentation. For convenience, you can set the
156-
default model name through the environment variable DEFAULT_LITELLM_MODEL.
157+
default model name through the environment variable TRACE_LITELLM_MODEL.
157158
When using Azure models via token provider, you can set the Azure token
158159
provider scope through the environment variable AZURE_TOKEN_PROVIDER_SCOPE.
159160
"""
160161

161162
def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None,
162163
cache=True) -> None:
163164
if model is None:
164-
model = os.environ.get('DEFAULT_LITELLM_MODEL', 'gpt-4o')
165+
model = os.environ.get('TRACE_LITELLM_MODEL')
166+
if model is None:
167+
warnings.warn("TRACE_LITELLM_MODEL environment variable is not found when loading the default model for LiteLLM. Attempt to load the default model from DEFAULT_LITELLM_MODEL environment variable. The usage of DEFAULT_LITELLM_MODEL will be deprecated. Please use the environment variable TRACE_LITELLM_MODEL for setting the default model name for LiteLLM.")
168+
model = os.environ.get('DEFAULT_LITELLM_MODEL', 'gpt-4o')
169+
165170
self.model_name = model
166171
self.cache = cache
167172
factory = lambda: self._factory(self.model_name) # an LLM instance uses a fixed model
@@ -198,9 +203,9 @@ class CustomLLM(AbstractModel):
198203
def __init__(self, model: Union[str, None] = None, reset_freq: Union[int, None] = None,
199204
cache=True) -> None:
200205
if model is None:
201-
model = os.environ.get('DEFAULT_LITELLM_CUSTOM_MODEL', 'gpt-4o')
202-
base_url = os.environ.get('DEFAULT_LITELLM_CUSTOM_URL', 'http://xx.xx.xxx.xx:4000')
203-
server_api_key = os.environ.get('DEFAULT_LITELLM_CUSTOM_API',
206+
model = os.environ.get('TRACE_CUSTOMLLM_MODEL', 'gpt-4o')
207+
base_url = os.environ.get('TRACE_CUSTOMLLM_URL', 'http://xx.xx.xxx.xx:4000')
208+
server_api_key = os.environ.get('TRACE_CUSTOMLLM_API_KEY',
204209
'sk-Xhg...') # we assume the server has an API key
205210
# the server API is set through `master_key` in `config.yaml` for LiteLLM proxy server
206211

@@ -224,5 +229,16 @@ def create(self, **config: Any):
224229
return self._model.chat.completions.create(**config)
225230

226231

227-
# Set Default LLM class
228-
LLM = LiteLLM # synonym
232+
233+
TRACE_DEFAULT_LLM_BACKEND = os.getenv('TRACE_DEFAULT_LLM_BACKEND', 'LiteLLM')
234+
if TRACE_DEFAULT_LLM_BACKEND == 'AutoGen':
235+
print("Using AutoGen as the default LLM backend.")
236+
LLM = AutoGenLLM
237+
elif TRACE_DEFAULT_LLM_BACKEND == 'CustomLLM':
238+
print("Using CustomLLM as the default LLM backend.")
239+
LLM = CustomLLM
240+
elif TRACE_DEFAULT_LLM_BACKEND == 'LiteLLM':
241+
print("Using LiteLLM as the default LLM backend.")
242+
LLM = LiteLLM
243+
else:
244+
raise ValueError(f"Unknown LLM backend: {TRACE_DEFAULT_LLM_BACKEND}")

tests/unit_tests/test_llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from opto.optimizers.utils import print_color
33
import os
44

5-
if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("DEFAULT_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
5+
if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
66
llm = LLM()
77
system_prompt = 'You are a helpful assistant.'
88
user_prompt = "Hello world."

tests/unit_tests/test_nodes.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
from opto.trace import node
33
from opto.trace import operators as ops
44
from opto.trace.utils import contain
5-
5+
import numpy as np
66

77
# Sum of str
88
x = node("NodeX")
@@ -151,4 +151,11 @@ def fun(x):
151151
assert x.description == "[ParameterNode] x"
152152

153153
x = node(1, trainable=True)
154-
assert x.description == "[ParameterNode] This is a ParameterNode in a computational graph."
154+
assert x.description == "[ParameterNode] This is a ParameterNode in a computational graph."
155+
156+
157+
# Test iterating numpy array
158+
x = node(np.array([1, 2, 3]))
159+
for i, v in enumerate(x):
160+
assert isinstance(v, type(x))
161+
assert v.data == x.data[i]

tests/unit_tests/test_optimizer.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def user(x):
3434
else:
3535
return "Success."
3636

37-
if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("DEFAULT_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
37+
if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
3838
# One-step optimization example
3939
x = node(-1.0, trainable=True)
4040
optimizer = OptoPrime([x])
@@ -124,7 +124,7 @@ def foobar_text(x):
124124
GRAPH.clear()
125125
x = node("negative point one", trainable=True)
126126

127-
if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("DEFAULT_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
127+
if os.path.exists("OAI_CONFIG_LIST") or os.environ.get("TRACE_LITELLM_MODEL") or os.environ.get("OPENAI_API_KEY"):
128128
optimizer = OptoPrime([x])
129129
output = foobar_text(x)
130130
feedback = user(output.data)

0 commit comments

Comments
 (0)