1
1
import openai
2
- import json
3
2
import os
4
3
import time
5
4
import logging
6
-
7
- from filelock import FileLock
5
+ import diskcache as dc
8
6
9
7
10
8
log = logging .getLogger ()
@@ -17,7 +15,7 @@ class OpenAIChat:
17
15
18
16
def __init__ (
19
17
self ,
20
- openai_model : str = "gpt-4 " ,
18
+ openai_model : str = "gpt-4o " ,
21
19
cache_path : str = os .path .expanduser ("~" ) + "/.cache" ,
22
20
):
23
21
"""
@@ -31,22 +29,20 @@ def __init__(
31
29
openai .api_key = api_key
32
30
self .openai_model = openai_model
33
31
34
- self .cache_path = os .path .join (cache_path , "openai_chat_cache.json" )
35
- self .cache_lock = FileLock (self .cache_path + ".lock" )
36
- with self .cache_lock :
37
- if not os .path .exists (self .cache_path ):
38
- if not os .path .exists (cache_path ):
39
- os .makedirs (cache_path )
40
- with open (self .cache_path , "w" ) as f :
41
- json .dump ({}, f )
32
+ self .cache_path = os .path .join (cache_path , "openai_chat_cache.diskcache" )
33
+ if not os .path .exists (cache_path ):
34
+ os .makedirs (cache_path )
42
35
43
36
def ask (self , message : str ) -> str :
44
- # check if the message is cached
45
- with open (self .cache_path , "r" ) as f :
46
- openai_responses = json .load (f )
37
+ cache_settings = dc .DEFAULT_SETTINGS .copy ()
38
+ cache_settings ["eviction_policy" ] = "none"
39
+ cache_settings ["size_limit" ] = int (1e12 )
40
+ cache_settings ["cull_limit" ] = 0
41
+ openai_responses = dc .Cache (self .cache_path , ** cache_settings )
42
+
43
+ if (self .openai_model , message ) in openai_responses :
44
+ reply = openai_responses [(self .openai_model , message )]
47
45
48
- if message in openai_responses .get (self .openai_model , {}).keys ():
49
- reply = openai_responses [self .openai_model ][message ]
50
46
else :
51
47
# Ask openai
52
48
if openai .api_key is None :
@@ -59,18 +55,10 @@ def ask(self, message: str) -> str:
59
55
{"role" : "user" , "content" : message },
60
56
]
61
57
chat = self ._send_request (messages )
62
-
63
58
reply = chat .choices [0 ].message .content
64
59
65
- # add reply to cache
66
- with self .cache_lock :
67
- with open (self .cache_path , "r" ) as f :
68
- openai_responses = json .load (f )
69
- if self .openai_model not in openai_responses .keys ():
70
- openai_responses [self .openai_model ] = {}
71
- openai_responses [self .openai_model ][message ] = reply
72
- with open (self .cache_path , "w" ) as f :
73
- json .dump (openai_responses , f )
60
+ openai_responses [(self .openai_model , message )] = reply
61
+ openai_responses .close ()
74
62
75
63
if "please provide" in reply .lower ():
76
64
return ""
0 commit comments