Added graph intefrace, added neo4j + networkx structure and updates to the notebook
This commit is contained in:
parent
279c7a0789
commit
0dffd1a261
3 changed files with 1327 additions and 2509 deletions
3793
Demo_graph.ipynb
3793
Demo_graph.ipynb
File diff suppressed because it is too large
Load diff
|
|
@ -67,3 +67,18 @@ class VectorDBInterface(Protocol):
|
||||||
# collection_name: str,
|
# collection_name: str,
|
||||||
# data_point_id: str
|
# data_point_id: str
|
||||||
# ): raise NotImplementedError
|
# ): raise NotImplementedError
|
||||||
|
""" Search """
|
||||||
|
@abstractmethod
|
||||||
|
async def search(
|
||||||
|
self,
|
||||||
|
collection_name: str,
|
||||||
|
query: object
|
||||||
|
): raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def batch_search(
|
||||||
|
self,
|
||||||
|
collection_name: str,
|
||||||
|
query: object
|
||||||
|
): raise NotImplementedError
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ import random
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import openai
|
import openai
|
||||||
|
from typing import List
|
||||||
|
|
||||||
HOST = os.getenv("OPENAI_API_BASE")
|
HOST = os.getenv("OPENAI_API_BASE")
|
||||||
HOST_TYPE = os.getenv("BACKEND_TYPE") # default None == ChatCompletion
|
HOST_TYPE = os.getenv("BACKEND_TYPE") # default None == ChatCompletion
|
||||||
|
|
@ -22,6 +23,7 @@ def retry_with_exponential_backoff(
|
||||||
"""Retry a function with exponential backoff."""
|
"""Retry a function with exponential backoff."""
|
||||||
|
|
||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
|
"""Wrapper for sync functions."""
|
||||||
# Initialize variables
|
# Initialize variables
|
||||||
num_retries = 0
|
num_retries = 0
|
||||||
delay = initial_delay
|
delay = initial_delay
|
||||||
|
|
@ -57,6 +59,7 @@ def retry_with_exponential_backoff(
|
||||||
|
|
||||||
@retry_with_exponential_backoff
|
@retry_with_exponential_backoff
|
||||||
def completions_with_backoff(**kwargs):
|
def completions_with_backoff(**kwargs):
|
||||||
|
"""Wrapper around ChatCompletion.create w/ backoff"""
|
||||||
# Local model
|
# Local model
|
||||||
return openai.chat.completions.create(**kwargs)
|
return openai.chat.completions.create(**kwargs)
|
||||||
|
|
||||||
|
|
@ -72,6 +75,9 @@ def aretry_with_exponential_backoff(
|
||||||
"""Retry a function with exponential backoff."""
|
"""Retry a function with exponential backoff."""
|
||||||
|
|
||||||
async def wrapper(*args, **kwargs):
|
async def wrapper(*args, **kwargs):
|
||||||
|
"""Wrapper for async functions.
|
||||||
|
:param args: list
|
||||||
|
:param kwargs: dict"""
|
||||||
# Initialize variables
|
# Initialize variables
|
||||||
num_retries = 0
|
num_retries = 0
|
||||||
delay = initial_delay
|
delay = initial_delay
|
||||||
|
|
@ -108,6 +114,7 @@ def aretry_with_exponential_backoff(
|
||||||
|
|
||||||
@aretry_with_exponential_backoff
|
@aretry_with_exponential_backoff
|
||||||
async def acompletions_with_backoff(**kwargs):
|
async def acompletions_with_backoff(**kwargs):
|
||||||
|
"""Wrapper around ChatCompletion.acreate w/ backoff"""
|
||||||
return await openai.chat.completions.acreate(**kwargs)
|
return await openai.chat.completions.acreate(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -134,11 +141,30 @@ async def async_get_embedding_with_backoff(text, model="text-embedding-ada-002")
|
||||||
|
|
||||||
@retry_with_exponential_backoff
|
@retry_with_exponential_backoff
|
||||||
def create_embedding_with_backoff(**kwargs):
|
def create_embedding_with_backoff(**kwargs):
|
||||||
|
"""Wrapper around Embedding.create w/ backoff"""
|
||||||
return openai.embeddings.create(**kwargs)
|
return openai.embeddings.create(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
def get_embedding_with_backoff(text, model="text-embedding-ada-002"):
|
def get_embedding_with_backoff(text:str, model:str="text-embedding-ada-002"):
|
||||||
|
"""To get text embeddings, import/call this function
|
||||||
|
It specifies defaults + handles rate-limiting
|
||||||
|
:param text: str
|
||||||
|
:param model: str
|
||||||
|
"""
|
||||||
text = text.replace("\n", " ")
|
text = text.replace("\n", " ")
|
||||||
response = create_embedding_with_backoff(input=[text], model=model)
|
response = create_embedding_with_backoff(input=[text], model=model)
|
||||||
embedding = response.data[0].embedding
|
embedding = response.data[0].embedding
|
||||||
return embedding
|
return embedding
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def async_get_multiple_embeddings_with_backoff(texts: List[str], models: List[str]) :
|
||||||
|
"""To get multiple text embeddings in parallel, import/call this function
|
||||||
|
It specifies defaults + handles rate-limiting + is async"""
|
||||||
|
# Create a generator of coroutines
|
||||||
|
coroutines = (async_get_embedding_with_backoff(text, model) for text, model in zip(texts, models))
|
||||||
|
|
||||||
|
# Run the coroutines in parallel and gather the results
|
||||||
|
embeddings = await asyncio.gather(*coroutines)
|
||||||
|
|
||||||
|
return embeddings
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue