File tree Expand file tree Collapse file tree 4 files changed +59
-11
lines changed
Expand file tree Collapse file tree 4 files changed +59
-11
lines changed Original file line number Diff line number Diff line change 123123 "outputs" : [],
124124 "source" : [
125125 " if not os.path.exists('data/Apple.pdf'):\n " ,
126- " print(f\" Fetching data/fin_train.jsonl \" )\n " ,
127- " response = requests.get(\" https://raw.githubusercontent.com/ContextualAI/examples/refs/heads/main/01-getting-started/data/fin_train.jsonl \" )\n " ,
128- " with open('data/fin_train.jsonl ', 'wb') as f:\n " ,
126+ " print(f\" Fetching data/Apple.pdf \" )\n " ,
127+ " response = requests.get(\" https://raw.githubusercontent.com/ContextualAI/examples/refs/heads/main/01-getting-started/data/Apple.pdf \" )\n " ,
128+ " with open('data/Apple.pdf ', 'wb') as f:\n " ,
129129 " f.write(response.content)"
130130 ]
131131 },
135135 "metadata" : {},
136136 "outputs" : [],
137137 "source" : [
138- " with open('Apple.pdf', 'rb') as f:\n " ,
138+ " with open('data/ Apple.pdf', 'rb') as f:\n " ,
139139 " ingestion_result = client.datastores.documents.ingest(datastore_id, file=f)\n " ,
140140 " document_id = ingestion_result.id\n " ,
141141 " print(f\" Successfully uploaded to datastore {datastore_id}\" )"
Original file line number Diff line number Diff line change 7474 "outputs" : [],
7575 "source" : [
7676 " import os\n " ,
77+ " import requests\n " ,
7778 " from contextual import ContextualAI\n " ,
7879 " from IPython.display import display, Markdown"
7980 ]
167168 " I am using a example PDF. You can also use your own documents here. If you have very long documents (hundreds of pages), processing can take longer."
168169 ]
169170 },
171+ {
172+ "cell_type" : " code" ,
173+ "execution_count" : null ,
174+ "metadata" : {},
175+ "outputs" : [],
176+ "source" : [
177+ " if not os.path.exists('data/Apple.pdf'):\n " ,
178+ " print(f\" Fetching data/Apple.pdf\" )\n " ,
179+ " response = requests.get(\" https://raw.githubusercontent.com/ContextualAI/examples/refs/heads/main/02-hands-on-lab/data/Apple.pdf\" )\n " ,
180+ " with open('data/Apple.pdf', 'wb') as f:\n " ,
181+ " f.write(response.content)"
182+ ]
183+ },
170184 {
171185 "cell_type" : " code" ,
172186 "execution_count" : 6 ,
Original file line number Diff line number Diff line change 3636 " import os\n " ,
3737 " import json\n " ,
3838 " import ast\n " ,
39+ " import requests\n " ,
3940 " import pandas as pd\n " ,
4041 " from contextual import ContextualAI"
4142 ]
158159 " Let's start with an evaluation dataset:"
159160 ]
160161 },
162+ {
163+ "cell_type" : " code" ,
164+ "execution_count" : null ,
165+ "metadata" : {},
166+ "outputs" : [],
167+ "source" : [
168+ " if not os.path.exists('data/eval_short.csv'):\n " ,
169+ " print(f\" Fetching data/eval_short.csv\" )\n " ,
170+ " response = requests.get(\" https://raw.githubusercontent.com/ContextualAI/examples/refs/heads/main/01-hands-on-lab/data/eval_short.csv\" )\n " ,
171+ " with open('data/eval_short.csv', 'wb') as f:\n " ,
172+ " f.write(response.content)"
173+ ]
174+ },
161175 {
162176 "cell_type" : " code" ,
163177 "execution_count" : 23 ,
715729 " - prompt field should be a question or statement that the model should respond to."
716730 ]
717731 },
732+ {
733+ "cell_type" : " code" ,
734+ "execution_count" : null ,
735+ "metadata" : {},
736+ "outputs" : [],
737+ "source" : [
738+ " if not os.path.exists('data/fin_train.jsonl'):\n " ,
739+ " print(f\" Fetching data/fin_train.jsonl\" )\n " ,
740+ " response = requests.get(\" https://raw.githubusercontent.com/ContextualAI/examples/refs/heads/main/01-hands-on-lab/data/fin_train.jsonl\" )\n " ,
741+ " with open('data/fin_train.jsonl', 'wb') as f:\n " ,
742+ " f.write(response.content)"
743+ ]
744+ },
718745 {
719746 "cell_type" : " code" ,
720747 "execution_count" : null ,
730757 " !head data/fin_train.jsonl"
731758 ]
732759 },
733- {
734- "cell_type" : " code" ,
735- "execution_count" : null ,
736- "metadata" : {},
737- "outputs" : [],
738- "source" : []
739- },
740760 {
741761 "cell_type" : " markdown" ,
742762 "metadata" : {
Original file line number Diff line number Diff line change 1818 "outputs" : [],
1919 "source" : [
2020 " import os\n " ,
21+ " import requests\n " ,
2122 " from contextual import ContextualAI\n " ,
2223 " from IPython.display import display, Markdown"
2324 ]
192193 " Now that you have updated the agent, go try running another evaluation job. You will see the performance has improved."
193194 ]
194195 },
196+ {
197+ "cell_type" : " code" ,
198+ "execution_count" : null ,
199+ "metadata" : {},
200+ "outputs" : [],
201+ "source" : [
202+ " if not os.path.exists('data/eval_short.csv'):\n " ,
203+ " print(f\" Fetching data/eval_short.csv\" )\n " ,
204+ " response = requests.get(\" https://raw.githubusercontent.com/ContextualAI/examples/refs/heads/main/02-hands-on-lab/data/eval_short.csv\" )\n " ,
205+ " with open('data/eval_short.csv', 'wb') as f:\n " ,
206+ " f.write(response.content)"
207+ ]
208+ },
195209 {
196210 "cell_type" : " code" ,
197211 "execution_count" : 28 ,
You can’t perform that action at this time.
0 commit comments