-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpineconeRead.js
43 lines (29 loc) · 1.19 KB
/
pineconeRead.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import dotenv from 'dotenv';
dotenv.config();
const PINECONE_API_KEY = process.env.PINECONE_API_KEY;
import { Pinecone } from '@pinecone-database/pinecone';
import { VectorDBQAChain } from "langchain/chains";
import { OpenAIEmbeddings } from "langchain/embeddings/openai";
import { PineconeStore } from "langchain/vectorstores/pinecone";
import { OpenAI } from "langchain/llms/openai";
import { RetrievalQAChain } from "langchain/chains";
import { ChatOpenAI } from "langchain/chat_models/openai";
// import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
const pinecone = new Pinecone({
apiKey: PINECONE_API_KEY,
environment: 'gcp-starter'
})
const pineconeIndex = pinecone.Index("warmseat");
const vectorStore = await PineconeStore.fromExistingIndex(
new OpenAIEmbeddings(),
{ pineconeIndex }
);
// console.log(pineconeIndex);
// // console.log(await warmseatIndex.describeIndexStats());
const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo" });
const chain = RetrievalQAChain.fromLLM(model, vectorStore.asRetriever());
// console.log(vectorStore.asRetriever());
const response = await chain.call({
query: "What's the summary of this talk?"
});
console.log(response);