diff --git a/README.md b/README.md index 8c8221bc7..a252acb7e 100644 --- a/README.md +++ b/README.md @@ -35,27 +35,8 @@ Accoroding to enviornment we are configuring the models which is indicated by VI EX: ```env VITE_LLM_MODELS_PROD="openai_gpt_4o,openai_gpt_4o_mini,diffbot,gemini_1.5_flash" -``` -According to the environment, we are configuring the models which indicated by VITE_LLM_MODELS_PROD variable we can configure models based on our needs. -EX: -```env -VITE_LLM_MODELS_PROD="openai_gpt_4o,openai_gpt_4o_mini,diffbot,gemini_1.5_flash" -``` -if you only want OpenAI: -```env -VITE_LLM_MODELS_PROD="diffbot,openai-gpt-3.5,openai-gpt-4o" -VITE_LLM_MODELS_PROD="diffbot,openai-gpt-3.5,openai-gpt-4o" -OPENAI_API_KEY="your-openai-key" ``` - -if you only want Diffbot: -```env -VITE_LLM_MODELS_PROD="diffbot" -VITE_LLM_MODELS_PROD="diffbot" -DIFFBOT_API_KEY="your-diffbot-key" -``` - You can then run Docker Compose to build and start all components: ```bash docker-compose up --build @@ -88,7 +69,6 @@ VITE_CHAT_MODES="" If however you want to specify the only vector mode or only graph mode you can do that by specifying the mode in the env: ```env VITE_CHAT_MODES="vector,graph" -VITE_CHAT_MODES="vector,graph" ``` #### Running Backend and Frontend separately (dev environment) @@ -105,7 +85,7 @@ Alternatively, you can run the backend and frontend separately: ``` - For the backend: -1. Create the backend/.env file by copy/pasting the backend/example.env. To streamline the initial setup and testing of the application, you can preconfigure user credentials directly within the .env file. This bypasses the login dialog and allows you to immediately connect with a predefined user. +1. Create the backend/.env file by copy/pasting the backend/example.env. To streamline the initial setup and testing of the application, you can preconfigure user credentials directly within the backend .env file. This bypasses the login dialog and allows you to immediately connect with a predefined user. - **NEO4J_URI**: - **NEO4J_USERNAME**: - **NEO4J_PASSWORD**: @@ -139,6 +119,8 @@ Allow unauthenticated request : Yes ## ENV | Env Variable Name | Mandatory/Optional | Default Value | Description | |-------------------------|--------------------|---------------|--------------------------------------------------------------------------------------------------| +| | +| **BACKEND ENV** | EMBEDDING_MODEL | Optional | all-MiniLM-L6-v2 | Model for generating the text embedding (all-MiniLM-L6-v2 , openai , vertexai) | | IS_EMBEDDING | Optional | true | Flag to enable text embedding | | KNN_MIN_SCORE | Optional | 0.94 | Minimum score for KNN algorithm | @@ -152,7 +134,13 @@ Allow unauthenticated request : Yes | LANGCHAIN_API_KEY | Optional | | API key for Langchain | | LANGCHAIN_PROJECT | Optional | | Project for Langchain | | LANGCHAIN_TRACING_V2 | Optional | true | Flag to enable Langchain tracing | +| GCS_FILE_CACHE | Optional | False | If set to True, will save the files to process into GCS. If set to False, will save the files locally | | LANGCHAIN_ENDPOINT | Optional | https://api.smith.langchain.com | Endpoint for Langchain API | +| ENTITY_EMBEDDING | Optional | False | If set to True, It will add embeddings for each entity in database | +| LLM_MODEL_CONFIG_ollama_ | Optional | | Set ollama config as - model_name,model_local_url for local deployments | +| RAGAS_EMBEDDING_MODEL | Optional | openai | embedding model used by ragas evaluation framework | +| | +| **FRONTEND ENV** | VITE_BACKEND_API_URL | Optional | http://localhost:8000 | URL for backend API | | VITE_BLOOM_URL | Optional | https://workspace-preview.neo4j.io/workspace/explore?connectURL={CONNECT_URL}&search=Show+me+a+graph&featureGenAISuggestions=true&featureGenAISuggestionsInternal=true | URL for Bloom visualization | | VITE_REACT_APP_SOURCES | Mandatory | local,youtube,wiki,s3 | List of input sources that will be available | @@ -163,10 +151,6 @@ Allow unauthenticated request : Yes | VITE_GOOGLE_CLIENT_ID | Optional | | Client ID for Google authentication | | VITE_LLM_MODELS_PROD | Optional | openai_gpt_4o,openai_gpt_4o_mini,diffbot,gemini_1.5_flash | To Distinguish models based on the Enviornment PROD or DEV | VITE_LLM_MODELS | Optional | 'diffbot,openai_gpt_3.5,openai_gpt_4o,openai_gpt_4o_mini,gemini_1.5_pro,gemini_1.5_flash,azure_ai_gpt_35,azure_ai_gpt_4o,ollama_llama3,groq_llama3_70b,anthropic_claude_3_5_sonnet' | Supported Models For the application -| GCS_FILE_CACHE | Optional | False | If set to True, will save the files to process into GCS. If set to False, will save the files locally | -| ENTITY_EMBEDDING | Optional | False | If set to True, It will add embeddings for each entity in database | -| LLM_MODEL_CONFIG_ollama_ | Optional | | Set ollama config as - model_name,model_local_url for local deployments | -| RAGAS_EMBEDDING_MODEL | Optional | openai | embedding model used by ragas evaluation framework | ## LLMs Supported 1. OpenAI diff --git a/backend/example.env b/backend/example.env index 2120b466b..f747a94e8 100644 --- a/backend/example.env +++ b/backend/example.env @@ -43,4 +43,5 @@ LLM_MODEL_CONFIG_bedrock_claude_3_5_sonnet="model_name,aws_access_key_id,aws_sec LLM_MODEL_CONFIG_ollama_llama3="model_name,model_local_url" YOUTUBE_TRANSCRIPT_PROXY="https://user:pass@domain:port" EFFECTIVE_SEARCH_RATIO=5 - +GRAPH_CLEANUP_MODEL="openai_gpt_4o" +CHUNKS_TO_BE_PROCESSED="50" diff --git a/backend/score.py b/backend/score.py index 5e00a601d..6869b1b85 100644 --- a/backend/score.py +++ b/backend/score.py @@ -13,7 +13,7 @@ from src.graphDB_dataAccess import graphDBdataAccess from src.graph_query import get_graph_results,get_chunktext_results from src.chunkid_entities import get_entities_from_chunkids -from src.post_processing import create_vector_fulltext_indexes, create_entity_embedding +from src.post_processing import create_vector_fulltext_indexes, create_entity_embedding, graph_schema_consolidation from sse_starlette.sse import EventSourceResponse from src.communities import create_communities from src.neighbours import get_neighbour_nodes @@ -187,7 +187,8 @@ async def extract_knowledge_graph_from_file( allowedRelationship=Form(None), language=Form(None), access_token=Form(None), - retry_condition=Form(None) + retry_condition=Form(None), + additional_instructions=Form(None) ): """ Calls 'extract_graph_from_file' in a new thread to create Neo4jGraph from a @@ -210,22 +211,22 @@ async def extract_knowledge_graph_from_file( if source_type == 'local file': merged_file_path = os.path.join(MERGED_DIR,file_name) logging.info(f'File path:{merged_file_path}') - uri_latency, result = await extract_graph_from_file_local_file(uri, userName, password, database, model, merged_file_path, file_name, allowedNodes, allowedRelationship, retry_condition) + uri_latency, result = await extract_graph_from_file_local_file(uri, userName, password, database, model, merged_file_path, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions) elif source_type == 's3 bucket' and source_url: - uri_latency, result = await extract_graph_from_file_s3(uri, userName, password, database, model, source_url, aws_access_key_id, aws_secret_access_key, file_name, allowedNodes, allowedRelationship, retry_condition) + uri_latency, result = await extract_graph_from_file_s3(uri, userName, password, database, model, source_url, aws_access_key_id, aws_secret_access_key, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions) elif source_type == 'web-url': - uri_latency, result = await extract_graph_from_web_page(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, retry_condition) + uri_latency, result = await extract_graph_from_web_page(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions) elif source_type == 'youtube' and source_url: - uri_latency, result = await extract_graph_from_file_youtube(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, retry_condition) + uri_latency, result = await extract_graph_from_file_youtube(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions) elif source_type == 'Wikipedia' and wiki_query: - uri_latency, result = await extract_graph_from_file_Wikipedia(uri, userName, password, database, model, wiki_query, language, file_name, allowedNodes, allowedRelationship, retry_condition) + uri_latency, result = await extract_graph_from_file_Wikipedia(uri, userName, password, database, model, wiki_query, language, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions) elif source_type == 'gcs bucket' and gcs_bucket_name: - uri_latency, result = await extract_graph_from_file_gcs(uri, userName, password, database, model, gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token, file_name, allowedNodes, allowedRelationship, retry_condition) + uri_latency, result = await extract_graph_from_file_gcs(uri, userName, password, database, model, gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions) else: return create_api_response('Failed',message='source_type is other than accepted source') extract_api_time = time.time() - start_time @@ -334,10 +335,15 @@ async def post_processing(uri=Form(), userName=Form(), password=Form(), database await asyncio.to_thread(create_entity_embedding, graph) api_name = 'post_processing/create_entity_embedding' logging.info(f'Entity Embeddings created') + + if "graph_schema_consolidation" in tasks : + await asyncio.to_thread(graph_schema_consolidation, graph) + api_name = 'post_processing/graph_schema_consolidation' + logging.info(f'Updated nodes and relationship labels') if "enable_communities" in tasks: api_name = 'create_communities' - await asyncio.to_thread(create_communities, uri, userName, password, database) + await asyncio.to_thread(create_communities, uri, userName, password, database) logging.info(f'created communities') graph = create_graph_database_connection(uri, userName, password, database) @@ -347,9 +353,11 @@ async def post_processing(uri=Form(), userName=Form(), password=Form(), database if count_response: count_response = [{"filename": filename, **counts} for filename, counts in count_response.items()] logging.info(f'Updated source node with community related counts') + + end = time.time() elapsed_time = end - start - json_obj = {'api_name': api_name, 'db_url': uri, 'userName':userName, 'database':database, 'tasks':tasks, 'logging_time': formatted_time(datetime.now(timezone.utc)), 'elapsed_api_time':f'{elapsed_time:.2f}'} + json_obj = {'api_name': api_name, 'db_url': uri, 'userName':userName, 'database':database, 'logging_time': formatted_time(datetime.now(timezone.utc)), 'elapsed_api_time':f'{elapsed_time:.2f}'} logger.log_struct(json_obj) return create_api_response('Success', data=count_response, message='All tasks completed successfully') diff --git a/backend/src/llm.py b/backend/src/llm.py index 381a38a68..ae768da5b 100644 --- a/backend/src/llm.py +++ b/backend/src/llm.py @@ -13,6 +13,7 @@ from langchain_community.chat_models import ChatOllama import boto3 import google.auth +from src.shared.constants import ADDITIONAL_INSTRUCTIONS def get_llm(model: str): """Retrieve the specified language model based on the model name.""" @@ -160,14 +161,14 @@ def get_chunk_id_as_doc_metadata(chunkId_chunkDoc_list): async def get_graph_document_list( - llm, combined_chunk_document_list, allowedNodes, allowedRelationship + llm, combined_chunk_document_list, allowedNodes, allowedRelationship, additional_instructions=None ): futures = [] graph_document_list = [] if "diffbot_api_key" in dir(llm): llm_transformer = llm else: - if "get_name" in dir(llm) and llm.get_name() != "ChatOenAI" or llm.get_name() != "ChatVertexAI" or llm.get_name() != "AzureChatOpenAI": + if "get_name" in dir(llm) and llm.get_name() != "ChatOpenAI" or llm.get_name() != "ChatVertexAI" or llm.get_name() != "AzureChatOpenAI": node_properties = False relationship_properties = False else: @@ -180,6 +181,7 @@ async def get_graph_document_list( allowed_nodes=allowedNodes, allowed_relationships=allowedRelationship, ignore_tool_usage=True, + additional_instructions=ADDITIONAL_INSTRUCTIONS+ (additional_instructions if additional_instructions else "") ) if isinstance(llm,DiffbotGraphTransformer): @@ -189,7 +191,7 @@ async def get_graph_document_list( return graph_document_list -async def get_graph_from_llm(model, chunkId_chunkDoc_list, allowedNodes, allowedRelationship): +async def get_graph_from_llm(model, chunkId_chunkDoc_list, allowedNodes, allowedRelationship, additional_instructions=None): try: llm, model_name = get_llm(model) combined_chunk_document_list = get_combined_chunks(chunkId_chunkDoc_list) @@ -204,7 +206,7 @@ async def get_graph_from_llm(model, chunkId_chunkDoc_list, allowedNodes, allowed allowedRelationship = allowedRelationship.split(',') graph_document_list = await get_graph_document_list( - llm, combined_chunk_document_list, allowedNodes, allowedRelationship + llm, combined_chunk_document_list, allowedNodes, allowedRelationship, additional_instructions ) return graph_document_list except Exception as e: diff --git a/backend/src/main.py b/backend/src/main.py index 80098fa80..852740365 100644 --- a/backend/src/main.py +++ b/backend/src/main.py @@ -221,7 +221,7 @@ def create_source_node_graph_url_wikipedia(graph, model, wiki_query, source_type lst_file_name.append({'fileName':obj_source_node.file_name,'fileSize':obj_source_node.file_size,'url':obj_source_node.url, 'language':obj_source_node.language, 'status':'Success'}) return lst_file_name,success_count,failed_count -async def extract_graph_from_file_local_file(uri, userName, password, database, model, merged_file_path, fileName, allowedNodes, allowedRelationship, retry_condition): +async def extract_graph_from_file_local_file(uri, userName, password, database, model, merged_file_path, fileName, allowedNodes, allowedRelationship, retry_condition, additional_instructions): logging.info(f'Process file name :{fileName}') if not retry_condition: @@ -235,9 +235,9 @@ async def extract_graph_from_file_local_file(uri, userName, password, database, raise LLMGraphBuilderException(f'File content is not available for file : {file_name}') return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, True, merged_file_path) else: - return await processing_source(uri, userName, password, database, model, fileName, [], allowedNodes, allowedRelationship, True, merged_file_path, retry_condition) + return await processing_source(uri, userName, password, database, model, fileName, [], allowedNodes, allowedRelationship, True, merged_file_path, retry_condition, additional_instructions=additional_instructions) -async def extract_graph_from_file_s3(uri, userName, password, database, model, source_url, aws_access_key_id, aws_secret_access_key, file_name, allowedNodes, allowedRelationship, retry_condition): +async def extract_graph_from_file_s3(uri, userName, password, database, model, source_url, aws_access_key_id, aws_secret_access_key, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions): if not retry_condition: if(aws_access_key_id==None or aws_secret_access_key==None): raise LLMGraphBuilderException('Please provide AWS access and secret keys') @@ -249,18 +249,18 @@ async def extract_graph_from_file_s3(uri, userName, password, database, model, s raise LLMGraphBuilderException(f'File content is not available for file : {file_name}') return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship) else: - return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, retry_condition=retry_condition) + return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, retry_condition=retry_condition, additional_instructions=additional_instructions) -async def extract_graph_from_web_page(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, retry_condition): +async def extract_graph_from_web_page(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions): if not retry_condition: file_name, pages = get_documents_from_web_page(source_url) if pages==None or len(pages)==0: raise LLMGraphBuilderException(f'Content is not available for given URL : {file_name}') return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship) else: - return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, retry_condition=retry_condition) + return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, retry_condition=retry_condition, additional_instructions=additional_instructions) -async def extract_graph_from_file_youtube(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, retry_condition): +async def extract_graph_from_file_youtube(uri, userName, password, database, model, source_url, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions): if not retry_condition: file_name, pages = get_documents_from_youtube(source_url) @@ -268,27 +268,27 @@ async def extract_graph_from_file_youtube(uri, userName, password, database, mod raise LLMGraphBuilderException(f'Youtube transcript is not available for file : {file_name}') return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship) else: - return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, retry_condition=retry_condition) + return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, retry_condition=retry_condition, additional_instructions=additional_instructions) -async def extract_graph_from_file_Wikipedia(uri, userName, password, database, model, wiki_query, language, file_name, allowedNodes, allowedRelationship, retry_condition): +async def extract_graph_from_file_Wikipedia(uri, userName, password, database, model, wiki_query, language, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions): if not retry_condition: file_name, pages = get_documents_from_Wikipedia(wiki_query, language) if pages==None or len(pages)==0: raise LLMGraphBuilderException(f'Wikipedia page is not available for file : {file_name}') return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship) else: - return await processing_source(uri, userName, password, database, model, file_name,[], allowedNodes, allowedRelationship, retry_condition=retry_condition) + return await processing_source(uri, userName, password, database, model, file_name,[], allowedNodes, allowedRelationship, retry_condition=retry_condition, additional_instructions=additional_instructions) -async def extract_graph_from_file_gcs(uri, userName, password, database, model, gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token, file_name, allowedNodes, allowedRelationship, retry_condition): +async def extract_graph_from_file_gcs(uri, userName, password, database, model, gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token, file_name, allowedNodes, allowedRelationship, retry_condition, additional_instructions): if not retry_condition: file_name, pages = get_documents_from_gcs(gcs_project_id, gcs_bucket_name, gcs_bucket_folder, gcs_blob_filename, access_token) if pages==None or len(pages)==0: raise LLMGraphBuilderException(f'File content is not available for file : {file_name}') return await processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship) else: - return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, retry_condition=retry_condition) + return await processing_source(uri, userName, password, database, model, file_name, [], allowedNodes, allowedRelationship, retry_condition=retry_condition, additional_instructions=additional_instructions) -async def processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, is_uploaded_from_local=None, merged_file_path=None, retry_condition=None): +async def processing_source(uri, userName, password, database, model, file_name, pages, allowedNodes, allowedRelationship, is_uploaded_from_local=None, merged_file_path=None, retry_condition=None, additional_instructions=None): """ Extracts a Neo4jGraph from a PDF file based on the model. @@ -361,11 +361,14 @@ async def processing_source(uri, userName, password, database, model, file_name, logging.info('Update the status as Processing') update_graph_chunk_processed = int(os.environ.get('UPDATE_GRAPH_CHUNKS_PROCESSED')) + chunk_to_be_processed = int(os.environ.get('CHUNKS_TO_BE_PROCESSED', '50')) # selected_chunks = [] is_cancelled_status = False job_status = "Completed" for i in range(0, len(chunkId_chunkDoc_list), update_graph_chunk_processed): select_chunks_upto = i+update_graph_chunk_processed + if select_chunks_upto > chunk_to_be_processed: + break logging.info(f'Selected Chunks upto: {select_chunks_upto}') if len(chunkId_chunkDoc_list) <= select_chunks_upto: select_chunks_upto = len(chunkId_chunkDoc_list) @@ -380,7 +383,7 @@ async def processing_source(uri, userName, password, database, model, file_name, break else: processing_chunks_start_time = time.time() - node_count,rel_count,latency_processed_chunk = await processing_chunks(selected_chunks,graph,uri, userName, password, database,file_name,model,allowedNodes,allowedRelationship,node_count, rel_count) + node_count,rel_count,latency_processed_chunk = await processing_chunks(selected_chunks,graph,uri, userName, password, database,file_name,model,allowedNodes,allowedRelationship,node_count, rel_count, additional_instructions) processing_chunks_end_time = time.time() processing_chunks_elapsed_end_time = processing_chunks_end_time - processing_chunks_start_time logging.info(f"Time taken {update_graph_chunk_processed} chunks processed upto {select_chunks_upto} completed in {processing_chunks_elapsed_end_time:.2f} seconds for file name {file_name}") @@ -457,7 +460,7 @@ async def processing_source(uri, userName, password, database, model, file_name, logging.error(error_message) raise LLMGraphBuilderException(error_message) -async def processing_chunks(chunkId_chunkDoc_list,graph,uri, userName, password, database,file_name,model,allowedNodes,allowedRelationship, node_count, rel_count): +async def processing_chunks(chunkId_chunkDoc_list,graph,uri, userName, password, database,file_name,model,allowedNodes,allowedRelationship, node_count, rel_count, additional_instructions=None): #create vector index and update chunk node with embedding latency_processing_chunk = {} if graph is not None: @@ -475,7 +478,7 @@ async def processing_chunks(chunkId_chunkDoc_list,graph,uri, userName, password, logging.info("Get graph document list from models") start_entity_extraction = time.time() - graph_documents = await get_graph_from_llm(model, chunkId_chunkDoc_list, allowedNodes, allowedRelationship) + graph_documents = await get_graph_from_llm(model, chunkId_chunkDoc_list, allowedNodes, allowedRelationship, additional_instructions) end_entity_extraction = time.time() elapsed_entity_extraction = end_entity_extraction - start_entity_extraction logging.info(f'Time taken to extract enitities from LLM Graph Builder: {elapsed_entity_extraction:.2f} seconds') @@ -677,7 +680,7 @@ def get_labels_and_relationtypes(graph): return label order by label limit 100 } as labels, collect { CALL db.relationshipTypes() yield relationshipType as type - WHERE NOT type IN ['PART_OF', 'NEXT_CHUNK', 'HAS_ENTITY', '_Bloom_Perspective_','FIRST_CHUNK'] + WHERE NOT type IN ['PART_OF', 'NEXT_CHUNK', 'HAS_ENTITY', '_Bloom_Perspective_','FIRST_CHUNK','SIMILAR','IN_COMMUNITY','PARENT_COMMUNITY'] return type order by type LIMIT 100 } as relationshipTypes """ graphDb_data_Access = graphDBdataAccess(graph) diff --git a/backend/src/post_processing.py b/backend/src/post_processing.py index 47fafebda..8b79f93bc 100644 --- a/backend/src/post_processing.py +++ b/backend/src/post_processing.py @@ -4,6 +4,11 @@ from langchain_neo4j import Neo4jGraph import os from src.shared.common_fn import load_embedding_model +from langchain_core.output_parsers import JsonOutputParser +from langchain_core.prompts import ChatPromptTemplate +from src.shared.constants import GRAPH_CLEANUP_PROMPT +from src.llm import get_llm +from src.main import get_labels_and_relationtypes DROP_INDEX_QUERY = "DROP INDEX entities IF EXISTS;" LABELS_QUERY = "CALL db.labels()" @@ -187,4 +192,61 @@ def update_embeddings(rows, graph): MATCH (e) WHERE elementId(e) = row.elementId CALL db.create.setNodeVectorProperty(e, "embedding", row.embedding) """ - return graph.query(query,params={'rows':rows}) \ No newline at end of file + return graph.query(query,params={'rows':rows}) + +def graph_schema_consolidation(graph): + nodes_and_relations = get_labels_and_relationtypes(graph) + logging.info(f"nodes_and_relations in existing graph : {nodes_and_relations}") + node_labels = [] + relation_labels = [] + + node_labels.extend(nodes_and_relations[0]['labels']) + relation_labels.extend(nodes_and_relations[0]['relationshipTypes']) + + parser = JsonOutputParser() + prompt = ChatPromptTemplate(messages=[("system",GRAPH_CLEANUP_PROMPT),("human", "{input}")], + partial_variables={"format_instructions": parser.get_format_instructions()}) + + graph_cleanup_model = os.getenv("GRAPH_CLEANUP_MODEL",'openai_gpt_4o') + llm, _ = get_llm(graph_cleanup_model) + chain = prompt | llm | parser + nodes_dict = chain.invoke({'input':node_labels}) + relation_dict = chain.invoke({'input':relation_labels}) + + node_match = {} + relation_match = {} + for new_label , values in nodes_dict.items() : + for old_label in values: + if new_label != old_label: + node_match[old_label]=new_label + + for new_label , values in relation_dict.items() : + for old_label in values: + if new_label != old_label: + relation_match[old_label]=new_label + + logging.info(f"updated node labels : {node_match}") + logging.info(f"updated relationship labels : {relation_match}") + + # Update node labels in graph + for old_label, new_label in node_match.items(): + query = f""" + MATCH (n:`{old_label}`) + SET n:`{new_label}` + REMOVE n:`{old_label}` + """ + graph.query(query) + + # Update relation types in graph + for old_label, new_label in relation_match.items(): + query = f""" + MATCH (n)-[r:`{old_label}`]->(m) + CREATE (n)-[r2:`{new_label}`]->(m) + DELETE r + """ + graph.query(query) + + return None + + + \ No newline at end of file diff --git a/backend/src/shared/constants.py b/backend/src/shared/constants.py index ef696de85..6a69d166d 100644 --- a/backend/src/shared/constants.py +++ b/backend/src/shared/constants.py @@ -831,45 +831,30 @@ DELETE_ENTITIES_AND_START_FROM_BEGINNING = "delete_entities_and_start_from_beginning" START_FROM_LAST_PROCESSED_POSITION = "start_from_last_processed_position" -PROMPT_TO_ALL_LLMs = """ -"# Knowledge Graph Instructions for LLMs\n" - "## 1. Overview\n" - "You are a top-tier algorithm designed for extracting information in structured " - "formats to build a knowledge graph.\n" - "Try to capture as much information from the text as possible without " - "sacrificing accuracy. Do not add any information that is not explicitly " - "mentioned in the text.\n" - "- **Nodes** represent entities and concepts.\n" - "- The aim is to achieve simplicity and clarity in the knowledge graph, making it\n" - "accessible for a vast audience.\n" - "## 2. Labeling Nodes\n" - "- **Consistency**: Ensure you use available types for node labels.\n" - "Ensure you use basic or elementary types for node labels.\n" - "- For example, when you identify an entity representing a person, " - "always label it as **'person'**. Avoid using more specific terms " - "like 'mathematician' or 'scientist'." - "- **Node IDs**: Never utilize integers as node IDs. Node IDs should be " - "names or human-readable identifiers found in the text.\n" - "- **Relationships** represent connections between entities or concepts.\n" - "Ensure consistency and generality in relationship types when constructing " - "knowledge graphs. Instead of using specific and momentary types " - "such as 'BECAME_PROFESSOR', use more general and timeless relationship types " - "like 'PROFESSOR'. Make sure to use general and timeless relationship types!\n" - "## 3. Coreference Resolution\n" - "- **Maintain Entity Consistency**: When extracting entities, it's vital to " - "ensure consistency.\n" - 'If an entity, such as "John Doe", is mentioned multiple times in the text ' - 'but is referred to by different names or pronouns (e.g., "Joe", "he"),' - "always use the most complete identifier for that entity throughout the " - 'knowledge graph. In this example, use "John Doe" as the entity ID.\n' - "Remember, the knowledge graph should be coherent and easily understandable, " - "so maintaining consistency in entity references is crucial.\n" - "## 4. Node Properties\n" - "- Dates, URLs, Time, and Numerical Values: Instead of creating separate nodes for - these elements, represent them as properties of existing nodes." - "- Example: Instead of creating a node labeled "2023-03-15" and connecting it to another node - with the relationship "BORN_ON", add a property called "born_on" to the person node with the - value "2023-03-15"." - "## 5. Strict Compliance\n" - "Adhere to the rules strictly. Non-compliance will result in termination." - """ +GRAPH_CLEANUP_PROMPT = """Please consolidate the following list of types into a smaller set of more general, semantically +related types. The consolidated types must be drawn from the original list; do not introduce new types. +Return a JSON object representing the mapping of original types to consolidated types. Every key is the consolidated type +and value is list of the original types that were merged into the consolidated type. Prioritize using the most generic and +repeated term when merging. If a type doesn't merge with any other type, it should still be included in the output, +mapped to itself. + +**Input:** A list of strings representing the types to be consolidated. These types may represent either node +labels or relationship labels Your algorithm should do appropriate groupings based on semantic similarity. + +Example 1: +Input: +[ "Person", "Human", "People", "Company", "Organization", "Product"] +Output : +[Person": ["Person", "Human", "People"], Organization": ["Company", "Organization"], Product": ["Product"]] + +Example 2: +Input : +["CREATED_FOR", "CREATED_TO", "CREATED", "PLACE", "LOCATION", "VENUE"] +Output: +["CREATED": ["CREATED_FOR", "CREATED_TO", "CREATED"],"PLACE": ["PLACE", "LOCATION", "VENUE"]] +""" + +ADDITIONAL_INSTRUCTIONS = """Your goal is to identify and categorize entities while ensuring that specific data +types such as dates, numbers, revenues, and other non-entity information are not extracted as separate nodes. +Instead, treat these as properties associated with the relevant entities.""" + diff --git a/docs/frontend/frontend_docs.adoc b/docs/frontend/frontend_docs.adoc index 34e71f254..fac052739 100644 --- a/docs/frontend/frontend_docs.adoc +++ b/docs/frontend/frontend_docs.adoc @@ -16,17 +16,20 @@ This document provides a comprehensive guide for developers on how we build a Re == Folders . + ├── API + ├── Assets ├── Components | ├─ ChatBot + | | ├─ Chatbot | | ├─ ChatInfoModal - | | ├─ ChatModeToggle - | | ├─ ExpandedChatButtonContainer | | ├─ ChatModesSwitch + | | ├─ ChatModeToggle | | ├─ ChatOnlyComponent | | ├─ ChatInfo | | ├─ CommonChatActions | | ├─ CommunitiesInfo | | ├─ EntitiesInfo + | | ├─ ExpandedChatButtonContainer | | ├─ MetricsCheckbox | | ├─ MetricsTab | | ├─ MultiModeMetrics @@ -64,12 +67,15 @@ This document provides a comprehensive guide for developers on how we build a Re | ├─ UI | | ├─ Alert | | ├─ ButtonWithTooltip + | | |─ BreakDownPopOver | | ├─ CustomButton | | ├─ CustomCheckBox | | ├─ CustomMenu + | | ├─ CustomPopOver | | ├─ CustomProgressBar | | ├─ DatabaseIcon | | ├─ DatabaseStatusIcon + | | ├─ Dropdown | | ├─ ErrorBoundary | | ├─ FallBackDialog | | ├─ HoverableLink @@ -86,7 +92,6 @@ This document provides a comprehensive guide for developers on how we build a Re | | ├─ GenericSourceButton | | ├─ GenericSourceModal | ├─ Content - | ├─ Dropdown | ├─ FileTable | ├─ QuickStarter ├── HOC @@ -103,6 +108,9 @@ This document provides a comprehensive guide for developers on how we build a Re | ├─ UserCredentials | ├─ UserMessages | ├─ UserFiles + ├── HOC + | ├─ CustomModal + | ├─ WithVisibility ├── Hooks | ├─ useSourceInput | ├─ useSpeech @@ -136,72 +144,123 @@ Created a connection modal by adding details including protocol, URI, database n * If GDS Connection is there icon is scientific molecule > Graph enhancement model > Post processing jobs > gives user the leverage to check and uncheck the communities checkbox. * If AURA DB > icon is database icon > Graph enhancement model > Post processing jobs > communities checkbox is disabled. -* Before Connection : - image::images/ConnectionModal.jpg[NoConnection, 600] - * After connection: + * ** Aura DS Connection ** + +image::images/GraphDBConnection.jpg[Connection, 600] + + * ** Aura DB connection ** -image::images/NoFiles.jpg[Connection, 600] +image::images/AuraDBConnection.jpg[Connection, 600] + + * **ReadOnly User** + +image::images/ReadOnlyUser.jpg[ReadOnlyUser, 600] + + * **User not connected** + +image::images/NoConnection.jpg[User not Connection, 600] == 3. File Source integration: Implemented various file source integrations including drag-and-drop, web sources search that includes YouTube video, Wikipedia link, Amazon S3 file access, and Google Cloud Storage (GCS) file access. This allows users to upload PDF files from local storage or directly from the integrated sources. The Api’s are as follows: -* ***/source_list:*** - ** to fetch the list of files in the DB +* ***/source_list:*** to fetch the list of files in the DB -image::images/WithFiles.jpg[Connected, 600] +image::images/WithData.jpg[Connected, 600] -* ***/upload:*** - ** to upload files from Local +* ***/upload:*** to upload files from Local image::images/UploadLocalFile.jpg[Local File, 600] - - - ** status 'Uploading' while file is get uploaded. - -image::images/UploadingStatus.jpg[Upload Status, 600] - -* ***/url/scan:*** - ** to scan the link or sources of YouTube, Wikipedia, and Web Sources +* ***/url/scan:*** to scan the link or sources of YouTube, Wikipedia, and Web Sources image::images/WebSources.jpg[WebSources, 600] -* ***/url/scan:*** - ** to scan the files of S3 and GCS. - *** Add the respective Bucket URL, access key and secret key to access ***S3 files***. +* ***/url/scan:*** to scan the files of S3 and GCS. + +1) Add the respective Bucket URL, access key and secret key to access S3 files. image::images/S3BucketScan.jpg[S3 scan, 600] - - **** Add the respective Project ID, Bucket name, and folder to access ***GCS files***. User gets a redirect to the authentication page to authenticate their google account. + +2) Add the respective Project ID, Bucket name, and folder to access GCS files. image::images/GCSbucketFiles.jpg[GCS scan, 600] +3) User gets a redirect to the authentication page to authenticate their google account. + image::images/Gcloud_auth.jpg[auth login scan, 600] == 4. File Source Extraction: -* ***/extract*** - ** to fetch the number of nodes and relationships created. +* ***/extract*** to fetch the number of nodes and relationships created. *** During Extraction the selected files or all files in ‘New’ state go into ‘Processing’ state and then ‘Completed’ state if there are no failures. image::images/GenerateGraph.jpg[Generate Graph, 600] +1) A file with status Completed has an option to be Reprocess with the following options : + +image::images/CompletedReadyToReprocess.jpg[CompletedReadyToReprocess, 600] + +2) A file with status Failed/ Cancelled has an option to be Reprocess with the following options : + +image::images/FailedReadyToReprocess.jpg[FailedReadyToReprocess, 600] == 5. Graph Generation: -* Created a component for generating graphs based on the files in the table, to extract nodes and relationships. When the user clicks on the Preview Graph or on the Table View icon the user can see that the graph model holds three options for viewing: Lexical Graph, Entity Graph and Knowledge Graph. We utilized Neo4j's graph library to visualize the extracted nodes and relationships in the form of a graph query API: ***/graph_query***. There are options for customizing the graph visualization such as layout algorithms [zoom in, zoom out, fit, refresh], node styling, relationship types. -image::images/KnowledgeGraph.jpg[Knowledge Graph, 600] -image::images/EntityGraph.jpg[Entity Graph, 600] -image::images/EntityGraph.jpg[Entity Graph, 600] +* ***/graph_query:*** + *** Created a component for generating graphs based on the files in the table, to extract nodes and relationships. When the user clicks on the Preview Graph or on the Table View icon the user can see that the graph model holds three options for viewing: Lexical Graph, Entity Graph and Knowledge Graph. We utilized Neo4j's graph library to visualize the extracted nodes and relationships in the form of a graph query API: ***/graph_query***. There are options for customizing the graph visualization such as layout algorithms [zoom in, zoom out, fit, refresh], node styling, relationship types. + + * **Preview Graph** + +image::images/AllFilesGraph.jpg[AllFiles Graph, 600] + + * **File Graph** + +image::images/SingleFileQuery.jpg[Single File Graph, 600] + + * **Graph Types** + +1) Document & Chunk + +image::images/DocChunkGraph.jpg[Knowledge Graph, 600] + +2) Entities + +image::images/EntitiesGraph.jpg[Entity Graph, 600] + +3) Communities + +image::images/CommunitiesGraph.jpg[Community Graph, 600] + +* ***/get_neighbours:*** + ** This API is used to retrive the neighbor nodes of the given element id of the node. + +image::images/NeighbourNodeDisconnected.jpg[Neighbourhood Graph, 600] == 6. Chatbot: -* Created a Chatbot Component which has state variables to manage user input and chat messages. Once the user asks the question and clicks on the Ask button API: ***/chatbot*** is triggered to send user input to the backend and receive the response. The chat also has options for users to see more details about the chat, text to speech and copy the response. +Created a Chatbot Component which has state variables to manage user input and chat messages. Once the user asks the question and clicks on the Ask button API: ***/chatbot*** is triggered to send user input to the backend and receive the response. The chat also has options for users to see more details about the chat, text to speech and copy the response. + + * **Chat Drawer View** + +image::images/ChatBotSideView.jpg[ChatBotSideView, 600] + + * **Chat Modal View** + +image::images/ChatBotModalView.jpg[ChatBotModalView, 600] + + * **Chat Pop out View** + +image::images/ChatBotNewURL.jpg[ChatBotNewURL, 600] + + +* ***/clear_chat_bot:*** + ** to clear the chat history which is saved in Neo4j DB. + +image::images/ClearChatHistory.jpg[ClearChatHistory, 600] -image::images/ChatResponse.jpg[ChatResponse, 600] * ***/chunk_entities:*** @@ -209,7 +268,7 @@ image::images/ChatResponse.jpg[ChatResponse, 600] ***Sources*** -image::images/ChatInfoModal.jpg[ChatInfoModal, 600] +image::images/Sources.jpg[Sources, 600] ***Entities*** @@ -219,30 +278,43 @@ image::images/EntitiesInfo.jpg[EntitiesInfo, 600] image::images/ChunksInfo.jpg[ChunksInfo, 600] -* There are three modes ***Vector***, ***Graph***, ***Graph+Vector*** that can be provided to the chat to retrieve the answers. +* ***/metric:*** + ** The API responsible for a evaluating chatbot responses on the basis of different metrics such as faithfulness and answer relevancy. This utilises RAGAS library to calculate these metrics. + +image::images/MetricEval.jpg[MetricEval, 600] + +* ***/additional_metrics:*** + ** The API responsible for a evaluating chatbot responses on the basis of different metrics such as context entity recall, semantic score, rouge score. This reuqire additional ground truth to be supplied by user. This utilises RAGAS library to calculate these metrics. + +image::images/AdditionalMetricEval.jpg[AdditionalMetricEval, 600] -image::images/ChatModes.jpg[ChatModes, 600] - • In Vector mode, we only get the sources and chunks . +***Chat Modes*** -image::images/VectorMode.jpg[VectorMode, 600] +* There are five modes ***Vector***, ***Fulltext***, ***Graph+Vector+Fulltext***, ***Entity search+Vector***, ***Graph+Vector+Fulltext*** that can be provided to the chat to retrieve the answers in ***Production*** environment. +* There is one more mode ***Graph*** that can be provided to the chat to retrieve the answers in ***Development*** environment. +* There is one more mode ***Global search+Vector+Fulltext*** that can be provided to the chat to retrieve the answers if aura instance is ***GDS***. - • Graph Mode: Cypher query and Entities [DEV] +1) In Production Environment -image::images/GraphModeDetails.png[GraphMode, 600] -image::images/GraphModeQuery.png[GraphMode, 600] +image::images/ChatModesProd.jpg[ChatModesProd, 600] - • Graph+Vector Mode: Sources, Chunks and Entities +2) In Development Environment -image::images/GraphVectorMode.jpg[GraphVectorMode, 600] +image::images/ChatModesDev.jpg[ChatModesDev, 600] -== 6. Graph Enhancement Settings: + +== 7. Graph Enhancement Settings: Users can now set their own Schema for nodes and relations or can already be an existing schema. - + +* ***Entity Extraction Settings:*** + +image::images/GraphEnhancements.jpg[GraphEnhancements, 600] + * ***/schema:*** ** to fetch the existing schema that already exists in the db. -image::images/PredefinedSchema.jpg[PredefinedSchema, 600] +image::images/Schema.jpg[PredefinedSchema, 600] * ***/populate_graph_schema:*** ** to fetch the schema from user entered document text @@ -254,7 +326,28 @@ image::images/UserDefinedSchema.jpg[UserDefinedSchema, 600] image::images/DeleteOrphanNodes.jpg[DeleteOrphanNodes, 600] -== 7. Settings: +* ***/merge_duplicate_nodes:*** + +1) to merge the duplicate entities. + +image::images/MergeDuplicateEntities.jpg[MergeDuplicateEntities, 600] + +2) to get duplicate entities + +image::images/GetDuplicateNodes.jpg[GetDuplicateNodes, 600] + +* ***/post_processing :*** + to fine-tune the knowledge graph for improved performance and deeper analysis + +1) When GDS instance + +image::images/PostProcessingDB.jpg[PostProcessingDB, 600] + +2) When Aura DB instance + +image::images/PostProcessingDB.jpg[PostProcessingDB, 600] + +== 8. Application Options: * ***LLM Model*** @@ -262,26 +355,59 @@ User can select desired LLM models image::images/Dropdown.jpg[Dropdown, 600] -* ***Dark/Light Mode*** +* ***Documentation***: User can navigate to the application overview : https://neo4j.com/labs/genai-ecosystem/llm-graph-builder/ + +image::images/LLMGraphBuilderDocumentation.jpg[LLMGraphBuilderDocumentation, 600] + +* ***GitHub Issues***: User can navigate to the gitHub issues which are in developers bucket list : https://github.com/neo4j-labs/llm-graph-builder/issues + +image::images/GitHubIssues.jpg[GitHubIssues, 600] + -User can choose the application view : both in dark and light mode +* ***Dark/Light Mode***: User can choose the application view : both in dark and light mode + +1) Dark image::images/DarkMode.jpg[DarkMode, 600] +2) Light image::images/LightMode.jpg[LightMode, 600] -* ***Delete Files*** +* ***Chat Only Mode*** -User can delete all number/selected files from the table. +User can also use the chat only feature by navigating to the url at: https://llm-graph-builder.neo4jlabs.com/chat-only to ask questions related to documents which have been completely processed. User is required to pass the login credentials to connect to the database. -image::images/DeleteFiles.jpg[DeleteFiles, 600] +== 9. File Table Options: +User can explore various features available for files in the table, including sorting, filtering, viewing as a graph, examining nodes and relationships, copying file details, and accessing chunks related to the file. -* ***Chat Only Mode*** +***File Status*** + +image::images/FileStatus.jpg[FileStatus, 600] + +***File Nodes*** + +image::images/FileNodes.jpg[FileNodes, 600] + +***File Relationships*** + +image::images/FileRelationships.jpg[FileRelationships, 600] + +***File Actions*** -User can also use the chat only feature by navigating to the url https://dev-frontend-dcavk67s4a-uc.a.run.app/chat-only to ask questions related to documents which have been completely processed. User is required to pass the login credentials to connect to the database. +** ***Graph View*** -== 8. Interface Design: +image::images/GraphActions.jpg[GraphActions, 600] + + ** ***Copy File Data*** + +image::images/CopyFileData.jpg[CopyFileData, 600] + + ** ***Text Chunks*** + +image::images/TextChunks.jpg[TextChunks, 600] + +== 10. Interface Design: Designed a user-friendly interface that guides users through the process of connecting to Neo4j Aura, accessing file sources, uploading PDF files, and generating graphs. * ***Components:*** @neo4j-ndl/react @@ -290,7 +416,7 @@ Designed a user-friendly interface that guides users through the process of conn * ***NVL:*** @neo4j-nvl/core * ***CSS:*** Inline styling, tailwind CSS -== 9. Deployment: +== 11. Deployment: Followed best practices for optimizing performance and security of the deployed application. * ***Local Deployment:*** @@ -303,21 +429,19 @@ Followed best practices for optimizing performance and security of the deployed [source,indent=0] ---- - * LLM_MODELS="diffbot,openai-gpt-3.5,openai-gpt-4o" - * REACT_APP_SOURCES="local,youtube,wiki,s3,gcs,web" - * GOOGLE_CLIENT_ID="xxxx" [For Google GCS integration] - * CHAT_MODES="vector,graph+vector" - * CHUNK_SIZE=5242880 - * TIME_PER_BYTE=2 - * TIME_PER_PAGE=50 - * TIME_PER_CHUNK=4 - * LARGE_FILE_SIZE=5242880 - * ENV="PROD"/ ‘DEV’ - * NEO4J_USER_AGENT="LLM-Graph-Builder/v0.2-dev" - * BACKEND_API_URL= - * BLOOM_URL= - * NPM_TOKEN= - * BACKEND_PROCESSING_URL= + * VITE_LLM_MODELS="" + * VITE_REACT_APP_SOURCES="" + * VITE_GOOGLE_CLIENT_ID="xxxx" [For Google GCS integration] + * VITE_CHAT_MODES="" + * VITE_CHUNK_SIZE=5242880 + * VITE_TIME_PER_PAGE=50 + * VITE_LARGE_FILE_SIZE=5242880 + * VITE_ENV="PROD"/ ‘DEV’ + * VITE_BACKEND_API_URL= + * VITE_BLOOM_URL= + * VITE_BACKEND_PROCESSING_URL= + * VITE_LLM_MODELS_PROD="openai_gpt_4o,openai_gpt_4o_mini,diffbot,gemini_1.5_flash" + * VITE_BATCH_SIZE=2 ---- * ***Cloud Deployment:*** ** To deploy the app install the gcloud cli , run the following command in the terminal specifically from frontend root folder. @@ -327,7 +451,9 @@ Followed best practices for optimizing performance and security of the deployed *** Allow unauthenticated request : Yes -== 10. API Reference +== 12. API Reference + +=== 1) Connection Modal ----- POST /connect ----- @@ -341,7 +467,15 @@ Neo4j database connection on frontend is done with this API. * `password`= Neo4j db password, * `database`= Neo4j database name -=== Upload Files from Local +=== 2) Backend Database connection +---- +POST /backend_connection_configuation +---- + +The API responsible for create the connection obj from Neo4j DB based on environment variable and return the status for show/hide login dialog on UI + + +=== 3) Upload Files from Local ---- POST /upload ---- @@ -361,7 +495,7 @@ The upload endpoint is designed to handle the uploading of large files by breaki * `database`= Neo4j database name -=== User Defined Schema +=== 4) User Defined Schema ---- POST /schema ---- @@ -375,7 +509,7 @@ User can set schema for graph generation (i.e. Nodes and relationship labels) in * `password`= Neo4j db password, * `database`= Neo4j database name -=== Graph schema from Input Text +=== 5) Graph schema from Input Text ---- POST /populate_graph_schema ---- @@ -388,7 +522,7 @@ The API is used to populate a graph schema based on the provided input text, mod * `model`=The model to be used for populating the graph schema. * `is_schema_description_checked`=A flag indicating whether the schema description should be considered. -=== Unstructured Sources +=== 6) Unstructured Sources ---- POST /url/scan ---- @@ -414,7 +548,7 @@ Create Document node for other sources - s3 bucket, gcs bucket, wikipedia, youtu * `access_token`=Form(None) -=== Extration of Nodes and Relations from Data +=== 7) Extration of Nodes and Relations from Data ---- POST /extract ---- @@ -456,7 +590,7 @@ allowedNodes=Node labels passed from settings panel, * `allowedRelationship`=Relationship labels passed from settings panel, * `language`=Language in which wikipedia content will be extracted -=== Get list of sources +=== 8) Get list of sources ---- GET /sources_list ---- @@ -471,7 +605,7 @@ List all sources (Document nodes) present in Neo4j graph database. * `database`= Neo4j database name -=== Post processing after graph generation +=== 9) Post processing after graph generation ---- POST /post_processing : ---- @@ -486,7 +620,7 @@ This API is called at the end of processing of whole document to get create k-ne * `database`= Neo4j database name * `tasks`= List of tasks to perform -=== Chat with Data +=== 10) Chat with Data ---- POST /chat_bot ---- @@ -509,7 +643,7 @@ The API responsible for a chatbot system designed to leverage multiple AI models * `question`= User query for the chatbot * `session_id`= Session ID used to maintain the history of chats during the user's connection -=== Get entities from chunks +=== 11) Get entities from chunks ---- POST/chunk_entities ---- @@ -525,7 +659,7 @@ This API is used to get the entities and relations associated with a particular * `chunk_ids` = Chunk ids of document -=== Clear chat history +=== 12) Clear chat history ---- POST /clear_chat_bot ---- @@ -540,7 +674,7 @@ This API is used to clear the chat history which is saved in Neo4j DB. * `database`= Neo4j database name, * `session_id` = User session id for QA chat -=== View graph for a file +=== 13) View graph for a file ---- POST /graph_query ---- @@ -555,7 +689,22 @@ This API is used to view graph for a particular file. * `query_type`= Neo4j database name * `document_names` = File name for which user wants to view graph -=== SSE event to update processing status +=== 14) Get neighbour nodes +---- +POST /get_neighbours +---- + +This API is used to retrive the neighbor nodes of the given element id of the node. + +**API Parameters :** + +* `uri`=Neo4j uri, +* `userName`= Neo4j db username, +* `password`= Neo4j db password, +* `database`= Neo4j database name, +* `elementId` = Element id of the node to retrive its neighbours + +=== 15) SSE event to update processing status ---- GET /update_extract_status ---- @@ -584,7 +733,7 @@ The API gives the extraction status of a specified file. It uses Server-Sent Eve * `password`= Neo4j db password, * `database`= Neo4j database name -=== Delete selected documents +=== 16) Delete selected documents ---- POST /delete_document_and_entities ---- @@ -601,7 +750,7 @@ Deleteion of nodes and relations for multiple files is done through this API. Us * `source_types`= Document sources(Wikipedia, youtube, etc.), * `deleteEntities`= Boolean value to check entities deletion is requested or not -=== Cancel processing job +=== 17) Cancel processing job ---- POST/cancelled_job ---- @@ -617,7 +766,7 @@ This API is responsible for cancelling an in process job. * `filenames`= Name of the file whose processing need to be stopped, * `source_types`= Source of the file -=== Deletion of orpahn nodes +=== 18) Deletion of orpahn nodes ---- POST /delete_unconnected_nodes ---- @@ -632,12 +781,94 @@ The API is used to delete unconnected entities from database. * `database`= Neo4j database name, * `unconnected_entities_list`=selected entities list to delete of unconnected entities. +=== 19) Get the list of orphan nodes +---- +POST /get_unconnected_nodes_list +---- + +The API retrieves a list of nodes in the graph database that are not connected to any other nodes. + +**API Parameters :** + +* `uri`=Neo4j uri, +* `userName`= Neo4j db username, +* `password`= Neo4j db password, +* `database`= Neo4j database name + +=== 20) Get duplicate nodes +---- +POST /get_duplicate_nodes +---- + +The API is used to fetch duplicate entities from database. + +**API Parameters :** + +* `uri`=Neo4j uri, +* `userName`= Neo4j db username, +* `password`= Neo4j db password, +* `database`= Neo4j database name, + + +=== 21) Merge duplicate nodes +---- +POST /merge_duplicate_nodes +---- + +The API is used to merge duplicate entities from database selected by user. + +**API Parameters :** + +* `uri`=Neo4j uri, +* `userName`= Neo4j db username, +* `password`= Neo4j db password, +* `database`= Neo4j database name, +* `duplicate_nodes_list`= selected entities list to merge of with similar entities. + +=== 22) Drop and create vector index +---- +POST /drop_create_vector_index +---- + +The API is used to drop and create the vector index when vector index dimesion are different. + +**API Parameters :** + +* `uri`=Neo4j uri, +* `userName`= Neo4j db username, +* `password`= Neo4j db password, +* `database`= Neo4j database name, +* `isVectorIndexExist`= True or False based on whether vector index exist in database, + +=== 23) Reprocessing of sources +---- +POST /retry_processing +---- + +This API is used to Ready to Reprocess cancelled, completed or failed file sources. +Users have 3 options to Ready to Reprocess files: + +* Start from begnning - In this condition file will be processed from the begnning i.e. 1st chunk again. +* Delete entities and start from begnning - If the file source is already processed and have any existing nodes and relations then those will be deleted and file will be reprocessed from the 1st chunk. +* Start from last processed postion - Cancelled or failed files will be processed from the last successfully processed chunk position. This option is not available for completed files. + +Ones the status is set to 'Ready to Reprocess', user can again click on Generate graph to process the file for knowledge graph creation. + +**API Parameters :** + +* `uri`=Neo4j uri, +* `userName`= Neo4j db username, +* `password`= Neo4j db password, +* `database`= Neo4j database name, +* `file_name`= Name of the file which user want to Ready to Reprocess. +* `retry_condition` = One of the above 3 conditions which is selected for reprocessing. + -== 11. Conclusion: +== 13. Conclusion: In conclusion, this technical document outlines the process of building a React application with Neo4j Aura integration for graph database functionalities. -== 12. Referral Links: +== 14. Referral Links: * Dev env : https://dev-frontend-dcavk67s4a-uc.a.run.app/ * Staging env: https://staging-frontend-dcavk67s4a-uc.a.run.app/ * Prod env: https://prod-frontend-dcavk67s4a-uc.a.run.app/ diff --git a/docs/frontend/images/AdditionalMetricEval.jpg b/docs/frontend/images/AdditionalMetricEval.jpg new file mode 100644 index 000000000..2cdae6bec Binary files /dev/null and b/docs/frontend/images/AdditionalMetricEval.jpg differ diff --git a/docs/frontend/images/AllFilesGraph.jpg b/docs/frontend/images/AllFilesGraph.jpg new file mode 100644 index 000000000..21b0f9429 Binary files /dev/null and b/docs/frontend/images/AllFilesGraph.jpg differ diff --git a/docs/frontend/images/AuraDBConnection.jpg b/docs/frontend/images/AuraDBConnection.jpg new file mode 100644 index 000000000..786725a6d Binary files /dev/null and b/docs/frontend/images/AuraDBConnection.jpg differ diff --git a/docs/frontend/images/ChatBotModalView.jpg b/docs/frontend/images/ChatBotModalView.jpg new file mode 100644 index 000000000..9a7a44779 Binary files /dev/null and b/docs/frontend/images/ChatBotModalView.jpg differ diff --git a/docs/frontend/images/ChatBotNewURL.jpg b/docs/frontend/images/ChatBotNewURL.jpg new file mode 100644 index 000000000..63e4cb226 Binary files /dev/null and b/docs/frontend/images/ChatBotNewURL.jpg differ diff --git a/docs/frontend/images/ChatBotSideView.jpg b/docs/frontend/images/ChatBotSideView.jpg new file mode 100644 index 000000000..76c534883 Binary files /dev/null and b/docs/frontend/images/ChatBotSideView.jpg differ diff --git a/docs/frontend/images/ChatInfoModal.jpg b/docs/frontend/images/ChatInfoModal.jpg deleted file mode 100644 index 72c119800..000000000 Binary files a/docs/frontend/images/ChatInfoModal.jpg and /dev/null differ diff --git a/docs/frontend/images/ChatModes.jpg b/docs/frontend/images/ChatModes.jpg deleted file mode 100644 index 1dd835e24..000000000 Binary files a/docs/frontend/images/ChatModes.jpg and /dev/null differ diff --git a/docs/frontend/images/ChatModesDev.jpg b/docs/frontend/images/ChatModesDev.jpg new file mode 100644 index 000000000..204903695 Binary files /dev/null and b/docs/frontend/images/ChatModesDev.jpg differ diff --git a/docs/frontend/images/ChatModesProd.jpg b/docs/frontend/images/ChatModesProd.jpg new file mode 100644 index 000000000..90b5c4215 Binary files /dev/null and b/docs/frontend/images/ChatModesProd.jpg differ diff --git a/docs/frontend/images/ChatResponse.jpg b/docs/frontend/images/ChatResponse.jpg deleted file mode 100644 index 72c119800..000000000 Binary files a/docs/frontend/images/ChatResponse.jpg and /dev/null differ diff --git a/docs/frontend/images/ClearChatHistory.jpg b/docs/frontend/images/ClearChatHistory.jpg new file mode 100644 index 000000000..7db7b7e6a Binary files /dev/null and b/docs/frontend/images/ClearChatHistory.jpg differ diff --git a/docs/frontend/images/CommunitiesGraph.jpg b/docs/frontend/images/CommunitiesGraph.jpg new file mode 100644 index 000000000..ab939a611 Binary files /dev/null and b/docs/frontend/images/CommunitiesGraph.jpg differ diff --git a/docs/frontend/images/CompletedReadyToReprocess.jpg b/docs/frontend/images/CompletedReadyToReprocess.jpg new file mode 100644 index 000000000..3541df6ec Binary files /dev/null and b/docs/frontend/images/CompletedReadyToReprocess.jpg differ diff --git a/docs/frontend/images/CopyFileData.jpg b/docs/frontend/images/CopyFileData.jpg new file mode 100644 index 000000000..6356e633f Binary files /dev/null and b/docs/frontend/images/CopyFileData.jpg differ diff --git a/docs/frontend/images/DeleteOrphanNodes.jpg b/docs/frontend/images/DeleteOrphanNodes.jpg index e397cb4a7..5cf0e4cf3 100644 Binary files a/docs/frontend/images/DeleteOrphanNodes.jpg and b/docs/frontend/images/DeleteOrphanNodes.jpg differ diff --git a/docs/frontend/images/DocChunkGraph.jpg b/docs/frontend/images/DocChunkGraph.jpg new file mode 100644 index 000000000..fe74f5d93 Binary files /dev/null and b/docs/frontend/images/DocChunkGraph.jpg differ diff --git a/docs/frontend/images/DownLoadConversation.jpg b/docs/frontend/images/DownLoadConversation.jpg new file mode 100644 index 000000000..856ab85b9 Binary files /dev/null and b/docs/frontend/images/DownLoadConversation.jpg differ diff --git a/docs/frontend/images/EntitiesGraph.jpg b/docs/frontend/images/EntitiesGraph.jpg new file mode 100644 index 000000000..8fff408c0 Binary files /dev/null and b/docs/frontend/images/EntitiesGraph.jpg differ diff --git a/docs/frontend/images/EntityExtraction.jpg b/docs/frontend/images/EntityExtraction.jpg new file mode 100644 index 000000000..94ab74ff1 Binary files /dev/null and b/docs/frontend/images/EntityExtraction.jpg differ diff --git a/docs/frontend/images/EntityGraph.jpg b/docs/frontend/images/EntityGraph.jpg deleted file mode 100644 index 9e25473a9..000000000 Binary files a/docs/frontend/images/EntityGraph.jpg and /dev/null differ diff --git a/docs/frontend/images/FailedReadyToReprocess.jpg b/docs/frontend/images/FailedReadyToReprocess.jpg new file mode 100644 index 000000000..6a1940d8e Binary files /dev/null and b/docs/frontend/images/FailedReadyToReprocess.jpg differ diff --git a/docs/frontend/images/FileNodes.jpg b/docs/frontend/images/FileNodes.jpg new file mode 100644 index 000000000..aaae8f70b Binary files /dev/null and b/docs/frontend/images/FileNodes.jpg differ diff --git a/docs/frontend/images/FileRelationships.jpg b/docs/frontend/images/FileRelationships.jpg new file mode 100644 index 000000000..70de2462d Binary files /dev/null and b/docs/frontend/images/FileRelationships.jpg differ diff --git a/docs/frontend/images/FileStatus.jpg b/docs/frontend/images/FileStatus.jpg new file mode 100644 index 000000000..84b8bc185 Binary files /dev/null and b/docs/frontend/images/FileStatus.jpg differ diff --git a/docs/frontend/images/GEDeleteOrphanNodes.jpg b/docs/frontend/images/GEDeleteOrphanNodes.jpg deleted file mode 100644 index 203aacc57..000000000 Binary files a/docs/frontend/images/GEDeleteOrphanNodes.jpg and /dev/null differ diff --git a/docs/frontend/images/GenerateGraph.jpg b/docs/frontend/images/GenerateGraph.jpg index cc006d969..87dcc7959 100644 Binary files a/docs/frontend/images/GenerateGraph.jpg and b/docs/frontend/images/GenerateGraph.jpg differ diff --git a/docs/frontend/images/GetDuplicateNodes.jpg b/docs/frontend/images/GetDuplicateNodes.jpg new file mode 100644 index 000000000..3f26d8096 Binary files /dev/null and b/docs/frontend/images/GetDuplicateNodes.jpg differ diff --git a/docs/frontend/images/GitHubIssues.jpg b/docs/frontend/images/GitHubIssues.jpg new file mode 100644 index 000000000..18af83465 Binary files /dev/null and b/docs/frontend/images/GitHubIssues.jpg differ diff --git a/docs/frontend/images/GraphActions.jpg b/docs/frontend/images/GraphActions.jpg new file mode 100644 index 000000000..b221aa2b6 Binary files /dev/null and b/docs/frontend/images/GraphActions.jpg differ diff --git a/docs/frontend/images/GraphDBConnection.jpg b/docs/frontend/images/GraphDBConnection.jpg new file mode 100644 index 000000000..cc7684950 Binary files /dev/null and b/docs/frontend/images/GraphDBConnection.jpg differ diff --git a/docs/frontend/images/GraphEnhacements.jpg b/docs/frontend/images/GraphEnhacements.jpg deleted file mode 100644 index 8fb3d4fe2..000000000 Binary files a/docs/frontend/images/GraphEnhacements.jpg and /dev/null differ diff --git a/docs/frontend/images/GraphEnhancements.jpg b/docs/frontend/images/GraphEnhancements.jpg new file mode 100644 index 000000000..d5208ea0b Binary files /dev/null and b/docs/frontend/images/GraphEnhancements.jpg differ diff --git a/docs/frontend/images/GraphModeDetails.png b/docs/frontend/images/GraphModeDetails.png deleted file mode 100644 index d11e7dcd1..000000000 Binary files a/docs/frontend/images/GraphModeDetails.png and /dev/null differ diff --git a/docs/frontend/images/GraphModeQuery.png b/docs/frontend/images/GraphModeQuery.png deleted file mode 100644 index cfd7fbaf8..000000000 Binary files a/docs/frontend/images/GraphModeQuery.png and /dev/null differ diff --git a/docs/frontend/images/GraphVectorMode.jpg b/docs/frontend/images/GraphVectorMode.jpg deleted file mode 100644 index d378b860f..000000000 Binary files a/docs/frontend/images/GraphVectorMode.jpg and /dev/null differ diff --git a/docs/frontend/images/KnowledgeGraph.jpg b/docs/frontend/images/KnowledgeGraph.jpg deleted file mode 100644 index eeb20a627..000000000 Binary files a/docs/frontend/images/KnowledgeGraph.jpg and /dev/null differ diff --git a/docs/frontend/images/LLMGraphBuilderDocumentation.jpg b/docs/frontend/images/LLMGraphBuilderDocumentation.jpg new file mode 100644 index 000000000..5df7aa739 Binary files /dev/null and b/docs/frontend/images/LLMGraphBuilderDocumentation.jpg differ diff --git a/docs/frontend/images/LexicalGraph.jpg b/docs/frontend/images/LexicalGraph.jpg deleted file mode 100644 index 7de1543ac..000000000 Binary files a/docs/frontend/images/LexicalGraph.jpg and /dev/null differ diff --git a/docs/frontend/images/MergeDuplicateEntities.jpg b/docs/frontend/images/MergeDuplicateEntities.jpg new file mode 100644 index 000000000..2c22f07ac Binary files /dev/null and b/docs/frontend/images/MergeDuplicateEntities.jpg differ diff --git a/docs/frontend/images/MetricEval.jpg b/docs/frontend/images/MetricEval.jpg new file mode 100644 index 000000000..c1d33e83e Binary files /dev/null and b/docs/frontend/images/MetricEval.jpg differ diff --git a/docs/frontend/images/NeighbourNodeDisconnected.jpg b/docs/frontend/images/NeighbourNodeDisconnected.jpg new file mode 100644 index 000000000..829ac0a34 Binary files /dev/null and b/docs/frontend/images/NeighbourNodeDisconnected.jpg differ diff --git a/docs/frontend/images/NoConnection.jpg b/docs/frontend/images/NoConnection.jpg new file mode 100644 index 000000000..769ef542b Binary files /dev/null and b/docs/frontend/images/NoConnection.jpg differ diff --git a/docs/frontend/images/NoFiles.jpg b/docs/frontend/images/NoFiles.jpg deleted file mode 100644 index 7494026a4..000000000 Binary files a/docs/frontend/images/NoFiles.jpg and /dev/null differ diff --git a/docs/frontend/images/PostProcessingDB.jpg b/docs/frontend/images/PostProcessingDB.jpg new file mode 100644 index 000000000..63ebf0f5a Binary files /dev/null and b/docs/frontend/images/PostProcessingDB.jpg differ diff --git a/docs/frontend/images/PostProcessingGDS.jpg b/docs/frontend/images/PostProcessingGDS.jpg new file mode 100644 index 000000000..4498055d7 Binary files /dev/null and b/docs/frontend/images/PostProcessingGDS.jpg differ diff --git a/docs/frontend/images/PredefinedSchema.jpg b/docs/frontend/images/PredefinedSchema.jpg index 6b89ab137..4706af5d3 100644 Binary files a/docs/frontend/images/PredefinedSchema.jpg and b/docs/frontend/images/PredefinedSchema.jpg differ diff --git a/docs/frontend/images/ReadOnlyUser.jpg b/docs/frontend/images/ReadOnlyUser.jpg new file mode 100644 index 000000000..64e7e424f Binary files /dev/null and b/docs/frontend/images/ReadOnlyUser.jpg differ diff --git a/docs/frontend/images/Schema.jpg b/docs/frontend/images/Schema.jpg new file mode 100644 index 000000000..a1ce36d32 Binary files /dev/null and b/docs/frontend/images/Schema.jpg differ diff --git a/docs/frontend/images/SingleFileQuery.jpg b/docs/frontend/images/SingleFileQuery.jpg new file mode 100644 index 000000000..b817767c1 Binary files /dev/null and b/docs/frontend/images/SingleFileQuery.jpg differ diff --git a/docs/frontend/images/Sources.jpg b/docs/frontend/images/Sources.jpg new file mode 100644 index 000000000..d721b7481 Binary files /dev/null and b/docs/frontend/images/Sources.jpg differ diff --git a/docs/frontend/images/SourcesInfo.jpg b/docs/frontend/images/SourcesInfo.jpg deleted file mode 100644 index 80c8cfded..000000000 Binary files a/docs/frontend/images/SourcesInfo.jpg and /dev/null differ diff --git a/docs/frontend/images/TextChunks.jpg b/docs/frontend/images/TextChunks.jpg new file mode 100644 index 000000000..96e5f485c Binary files /dev/null and b/docs/frontend/images/TextChunks.jpg differ diff --git a/docs/frontend/images/UploadingStatus.jpg b/docs/frontend/images/UploadingStatus.jpg deleted file mode 100644 index 779daf239..000000000 Binary files a/docs/frontend/images/UploadingStatus.jpg and /dev/null differ diff --git a/docs/frontend/images/VectorMode.jpg b/docs/frontend/images/VectorMode.jpg deleted file mode 100644 index f0ebf4e37..000000000 Binary files a/docs/frontend/images/VectorMode.jpg and /dev/null differ diff --git a/docs/frontend/images/WithData.jpg b/docs/frontend/images/WithData.jpg new file mode 100644 index 000000000..caf994530 Binary files /dev/null and b/docs/frontend/images/WithData.jpg differ diff --git a/docs/frontend/images/WithFiles.jpg b/docs/frontend/images/WithFiles.jpg deleted file mode 100644 index a789c03bb..000000000 Binary files a/docs/frontend/images/WithFiles.jpg and /dev/null differ diff --git a/experiments/nova_models_trial.ipynb b/experiments/nova_models_trial.ipynb new file mode 100644 index 000000000..6ed2405ea --- /dev/null +++ b/experiments/nova_models_trial.ipynb @@ -0,0 +1,116 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Error invoking the LLM: 1 validation error for ChatBedrockConverse\n", + " Value error, Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid. Bedrock error:\n", + "\n", + "You must specify a region. [type=value_error, input_value={'model': 'amazon.nova-li...sable_streaming': False}, input_type=dict]\n", + " For further information visit https://errors.pydantic.dev/2.9/v/value_error\n" + ] + }, + { + "ename": "ValidationError", + "evalue": "1 validation error for ChatBedrockConverse\n Value error, Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid. Bedrock error:\n\nYou must specify a region. [type=value_error, input_value={'model': 'amazon.nova-li...sable_streaming': False}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.9/v/value_error", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[2], line 47\u001b[0m\n\u001b[1;32m 45\u001b[0m query \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCreate list of 3 popular movies\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 46\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m---> 47\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43mllm\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[43mquery\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 48\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLLM Response:\u001b[39m\u001b[38;5;124m\"\u001b[39m, response)\n\u001b[1;32m 49\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n", + "File \u001b[0;32m/opt/conda/envs/myenv/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:286\u001b[0m, in \u001b[0;36mBaseChatModel.invoke\u001b[0;34m(self, input, config, stop, **kwargs)\u001b[0m\n\u001b[1;32m 275\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21minvoke\u001b[39m(\n\u001b[1;32m 276\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 277\u001b[0m \u001b[38;5;28minput\u001b[39m: LanguageModelInput,\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 282\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m BaseMessage:\n\u001b[1;32m 283\u001b[0m config \u001b[38;5;241m=\u001b[39m ensure_config(config)\n\u001b[1;32m 284\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m cast(\n\u001b[1;32m 285\u001b[0m ChatGeneration,\n\u001b[0;32m--> 286\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate_prompt\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 287\u001b[0m \u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_convert_input\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 288\u001b[0m \u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 289\u001b[0m \u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcallbacks\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 290\u001b[0m \u001b[43m \u001b[49m\u001b[43mtags\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtags\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 291\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmetadata\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 292\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_name\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 293\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpop\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrun_id\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 294\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 295\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39mgenerations[\u001b[38;5;241m0\u001b[39m][\u001b[38;5;241m0\u001b[39m],\n\u001b[1;32m 296\u001b[0m )\u001b[38;5;241m.\u001b[39mmessage\n", + "File \u001b[0;32m/opt/conda/envs/myenv/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:786\u001b[0m, in \u001b[0;36mBaseChatModel.generate_prompt\u001b[0;34m(self, prompts, stop, callbacks, **kwargs)\u001b[0m\n\u001b[1;32m 778\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mgenerate_prompt\u001b[39m(\n\u001b[1;32m 779\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 780\u001b[0m prompts: \u001b[38;5;28mlist\u001b[39m[PromptValue],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 783\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 784\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m LLMResult:\n\u001b[1;32m 785\u001b[0m prompt_messages \u001b[38;5;241m=\u001b[39m [p\u001b[38;5;241m.\u001b[39mto_messages() \u001b[38;5;28;01mfor\u001b[39;00m p \u001b[38;5;129;01min\u001b[39;00m prompts]\n\u001b[0;32m--> 786\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate\u001b[49m\u001b[43m(\u001b[49m\u001b[43mprompt_messages\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcallbacks\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/conda/envs/myenv/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:643\u001b[0m, in \u001b[0;36mBaseChatModel.generate\u001b[0;34m(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 641\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_managers:\n\u001b[1;32m 642\u001b[0m run_managers[i]\u001b[38;5;241m.\u001b[39mon_llm_error(e, response\u001b[38;5;241m=\u001b[39mLLMResult(generations\u001b[38;5;241m=\u001b[39m[]))\n\u001b[0;32m--> 643\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 644\u001b[0m flattened_outputs \u001b[38;5;241m=\u001b[39m [\n\u001b[1;32m 645\u001b[0m LLMResult(generations\u001b[38;5;241m=\u001b[39m[res\u001b[38;5;241m.\u001b[39mgenerations], llm_output\u001b[38;5;241m=\u001b[39mres\u001b[38;5;241m.\u001b[39mllm_output) \u001b[38;5;66;03m# type: ignore[list-item]\u001b[39;00m\n\u001b[1;32m 646\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m res \u001b[38;5;129;01min\u001b[39;00m results\n\u001b[1;32m 647\u001b[0m ]\n\u001b[1;32m 648\u001b[0m llm_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_combine_llm_outputs([res\u001b[38;5;241m.\u001b[39mllm_output \u001b[38;5;28;01mfor\u001b[39;00m res \u001b[38;5;129;01min\u001b[39;00m results])\n", + "File \u001b[0;32m/opt/conda/envs/myenv/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:633\u001b[0m, in \u001b[0;36mBaseChatModel.generate\u001b[0;34m(self, messages, stop, callbacks, tags, metadata, run_name, run_id, **kwargs)\u001b[0m\n\u001b[1;32m 630\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(messages):\n\u001b[1;32m 631\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 632\u001b[0m results\u001b[38;5;241m.\u001b[39mappend(\n\u001b[0;32m--> 633\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_generate_with_cache\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 634\u001b[0m \u001b[43m \u001b[49m\u001b[43mm\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 635\u001b[0m \u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 636\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_managers\u001b[49m\u001b[43m[\u001b[49m\u001b[43mi\u001b[49m\u001b[43m]\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mrun_managers\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m 637\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 638\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 639\u001b[0m )\n\u001b[1;32m 640\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 641\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_managers:\n", + "File \u001b[0;32m/opt/conda/envs/myenv/lib/python3.10/site-packages/langchain_core/language_models/chat_models.py:851\u001b[0m, in \u001b[0;36mBaseChatModel._generate_with_cache\u001b[0;34m(self, messages, stop, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 849\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 850\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m inspect\u001b[38;5;241m.\u001b[39msignature(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_generate)\u001b[38;5;241m.\u001b[39mparameters\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m--> 851\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_generate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 852\u001b[0m \u001b[43m \u001b[49m\u001b[43mmessages\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mstop\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstop\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 853\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 854\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 855\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_generate(messages, stop\u001b[38;5;241m=\u001b[39mstop, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", + "File \u001b[0;32m/opt/conda/envs/myenv/lib/python3.10/site-packages/langchain_aws/chat_models/bedrock.py:524\u001b[0m, in \u001b[0;36mChatBedrock._generate\u001b[0;34m(self, messages, stop, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 516\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_generate\u001b[39m(\n\u001b[1;32m 517\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 518\u001b[0m messages: List[BaseMessage],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 521\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 522\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m ChatResult:\n\u001b[1;32m 523\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbeta_use_converse_api:\n\u001b[0;32m--> 524\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_as_converse\u001b[49m\u001b[38;5;241m.\u001b[39m_generate(\n\u001b[1;32m 525\u001b[0m messages, stop\u001b[38;5;241m=\u001b[39mstop, run_manager\u001b[38;5;241m=\u001b[39mrun_manager, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs\n\u001b[1;32m 526\u001b[0m )\n\u001b[1;32m 527\u001b[0m completion \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 528\u001b[0m llm_output: Dict[\u001b[38;5;28mstr\u001b[39m, Any] \u001b[38;5;241m=\u001b[39m {}\n", + "File \u001b[0;32m/opt/conda/envs/myenv/lib/python3.10/site-packages/langchain_aws/chat_models/bedrock.py:853\u001b[0m, in \u001b[0;36mChatBedrock._as_converse\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 851\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtemperature \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 852\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtemperature\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtemperature\n\u001b[0;32m--> 853\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mChatBedrockConverse\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 854\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 855\u001b[0m \u001b[43m \u001b[49m\u001b[43mregion_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mregion_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 856\u001b[0m \u001b[43m \u001b[49m\u001b[43mcredentials_profile_name\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcredentials_profile_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 857\u001b[0m \u001b[43m \u001b[49m\u001b[43maws_access_key_id\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43maws_access_key_id\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 858\u001b[0m \u001b[43m \u001b[49m\u001b[43maws_secret_access_key\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43maws_secret_access_key\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 859\u001b[0m \u001b[43m \u001b[49m\u001b[43maws_session_token\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43maws_session_token\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 860\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 861\u001b[0m \u001b[43m \u001b[49m\u001b[43mprovider\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mprovider\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 862\u001b[0m \u001b[43m \u001b[49m\u001b[43mbase_url\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mendpoint_url\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 863\u001b[0m \u001b[43m \u001b[49m\u001b[43mguardrail_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mguardrails\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mif\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_guardrails_enabled\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01melse\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[call-arg]\u001b[39;49;00m\n\u001b[1;32m 864\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 865\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/conda/envs/myenv/lib/python3.10/site-packages/langchain_core/load/serializable.py:125\u001b[0m, in \u001b[0;36mSerializable.__init__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs: Any, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 124\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"\"\"\"\u001b[39;00m\n\u001b[0;32m--> 125\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m/opt/conda/envs/myenv/lib/python3.10/site-packages/pydantic/main.py:212\u001b[0m, in \u001b[0;36mBaseModel.__init__\u001b[0;34m(self, **data)\u001b[0m\n\u001b[1;32m 210\u001b[0m \u001b[38;5;66;03m# `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks\u001b[39;00m\n\u001b[1;32m 211\u001b[0m __tracebackhide__ \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[0;32m--> 212\u001b[0m validated_self \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__pydantic_validator__\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mvalidate_python\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdata\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mself_instance\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 213\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m validated_self:\n\u001b[1;32m 214\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(\n\u001b[1;32m 215\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mA custom validator is returning a value other than `self`.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 216\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mReturning anything other than `self` from a top level model validator isn\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt supported when validating via `__init__`.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 217\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mSee the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.\u001b[39m\u001b[38;5;124m'\u001b[39m,\n\u001b[1;32m 218\u001b[0m category\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 219\u001b[0m )\n", + "\u001b[0;31mValidationError\u001b[0m: 1 validation error for ChatBedrockConverse\n Value error, Could not load credentials to authenticate with AWS client. Please check that credentials in the specified profile name are valid. Bedrock error:\n\nYou must specify a region. [type=value_error, input_value={'model': 'amazon.nova-li...sable_streaming': False}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.9/v/value_error" + ] + } + ], + "source": [ + "#Import Necessary Modules\n", + "import boto3\n", + "from langchain_aws import ChatBedrock\n", + "import os\n", + "from dotenv import load_dotenv\n", + "load_dotenv()\n", + "\n", + "try:\n", + " aws_access_key = os.getenv(\"AWS_ACCESS_KEY_ID\")\n", + " aws_secret_key = os.getenv(\"AWS_SECRET_ACCESS_KEY\")\n", + " region_name = \"us-east-1\"\n", + " model_name = \"amazon.nova-lite-v1:0\"\n", + "\n", + " if not aws_access_key or not aws_secret_key:\n", + " raise ValueError(\"AWS credentials are missing. Ensure AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are set in the .env file.\")\n", + "\n", + "except Exception as e:\n", + " print(f\"Error loading environment variables: {e}\")\n", + " raise\n", + "\n", + "#Initialize AWS Bedrock Client\n", + "try:\n", + " bedrock_client = boto3.client(\n", + " service_name=\"bedrock-runtime\",\n", + " region_name=region_name,\n", + " aws_access_key_id=aws_access_key,\n", + " aws_secret_access_key=aws_secret_key\n", + " )\n", + "except Exception as e:\n", + " print(f\"Error initializing Bedrock client: {e}\")\n", + " raise\n", + "\n", + "#Set Up LangChain ChatBedrock LLM\n", + "try:\n", + " llm = ChatBedrock(\n", + " client=bedrock_client,\n", + " model_id=model_name,\n", + " model_kwargs=dict(temperature=0)\n", + " )\n", + "except Exception as e:\n", + " print(f\"Error setting up ChatBedrock LLM: {e}\")\n", + " raise\n", + "\n", + "\n", + "query = \"Create list of 3 popular movies\"\n", + "try:\n", + " response = llm.invoke(query)\n", + " print(\"LLM Response:\", response)\n", + "except Exception as e:\n", + " print(f\"Error invoking the LLM: {e}\")\n", + " raise\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "myenv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.16" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/frontend/src/HOC/WithVisibility.tsx b/frontend/src/HOC/WithVisibility.tsx new file mode 100644 index 000000000..8b3dd53af --- /dev/null +++ b/frontend/src/HOC/WithVisibility.tsx @@ -0,0 +1,13 @@ +import { VisibilityProps } from "../types"; + +export function withVisibility

(WrappedComponent: React.ComponentType

) { + const VisibityControlled = (props: P & VisibilityProps) => { + if (props.isVisible === false) { + return null; + } + + return ; + }; + + return VisibityControlled; +} diff --git a/frontend/src/components/Content.tsx b/frontend/src/components/Content.tsx index 84e5e52fc..8967c7df9 100644 --- a/frontend/src/components/Content.tsx +++ b/frontend/src/components/Content.tsx @@ -44,14 +44,13 @@ import PostProcessingToast from './Popups/GraphEnhancementDialog/PostProcessingC import { getChunkText } from '../services/getChunkText'; import ChunkPopUp from './Popups/ChunkPopUp'; import { isExpired, isFileReadyToProcess } from '../utils/Utils'; +import { useHasSelections } from '../hooks/useHasSelections'; const ConfirmationDialog = lazy(() => import('./Popups/LargeFilePopUp/ConfirmationDialog')); let afterFirstRender = false; const Content: React.FC = ({ - isSchema, - setIsSchema, showEnhancementDialog, toggleEnhancementDialog, setOpenConnection, @@ -99,12 +98,15 @@ const Content: React.FC = ({ setProcessedCount, setchatModes, model, + additionalInstructions, + setAdditionalInstructions, } = useFileContext(); const [viewPoint, setViewPoint] = useState<'tableView' | 'showGraphView' | 'chatInfoView' | 'neighborView'>( 'tableView' ); const [showDeletePopUp, setshowDeletePopUp] = useState(false); const [deleteLoading, setdeleteLoading] = useState(false); + const hasSelections = useHasSelections(selectedNodes, selectedRels); const { updateStatusForLargeFiles } = useServerSideEvent( (inMinutes, time, fileName) => { @@ -135,11 +137,21 @@ const Content: React.FC = ({ } if (processedCount === 1 && queue.isEmpty()) { (async () => { - showNormalToast(); + showNormalToast( + + ); try { const payload = isGdsActive - ? postProcessingTasks - : postProcessingTasks.filter((task) => task !== 'enable_communities'); + ? hasSelections + ? postProcessingTasks.filter((task) => task !== 'graph_schema_consolidation') + : postProcessingTasks + : hasSelections + ? postProcessingTasks.filter((task) => task !== 'graph_schema_consolidation' && task !== 'enable_communities') + : postProcessingTasks.filter((task) => task !== 'enable_communities'); const response = await postProcessing(userCredentials as UserCredentials, payload); if (response.data.status === 'Success') { const communityfiles = response.data?.data; @@ -185,13 +197,6 @@ const Content: React.FC = ({ afterFirstRender = true; }, [queue.items.length, userCredentials]); - useEffect(() => { - const storedSchema = localStorage.getItem('isSchema'); - if (storedSchema !== null) { - setIsSchema(JSON.parse(storedSchema)); - } - }, [isSchema]); - const handleDropdownChange = (selectedOption: OptionType | null | void) => { if (selectedOption?.value) { setModel(selectedOption?.value); @@ -284,7 +289,8 @@ const Content: React.FC = ({ selectedRels.map((t) => t.value), fileItem.googleProjectId, fileItem.language, - fileItem.accessToken + fileItem.accessToken, + additionalInstructions ); if (apiResponse?.status === 'Failed') { let errorobj = { error: apiResponse.error, message: apiResponse.message, fileName: apiResponse.file_name }; @@ -374,7 +380,9 @@ const Content: React.FC = ({ const addFilesToQueue = async (remainingFiles: CustomFile[]) => { if (!remainingFiles.length) { - showNormalToast(); + showNormalToast( + + ); try { const response = await postProcessing(userCredentials as UserCredentials, postProcessingTasks); if (response.data.status === 'Success') { @@ -524,9 +532,8 @@ const Content: React.FC = ({ const handleOpenGraphClick = () => { const bloomUrl = process.env.VITE_BLOOM_URL; const uriCoded = userCredentials?.uri.replace(/:\d+$/, ''); - const connectURL = `${uriCoded?.split('//')[0]}//${userCredentials?.userName}@${uriCoded?.split('//')[1]}:${ - userCredentials?.port ?? '7687' - }`; + const connectURL = `${uriCoded?.split('//')[0]}//${userCredentials?.userName}@${uriCoded?.split('//')[1]}:${userCredentials?.port ?? '7687' + }`; const encodedURL = encodeURIComponent(connectURL); const replacedUrl = bloomUrl?.replace('{CONNECT_URL}', encodedURL); window.open(replacedUrl, '_blank'); @@ -547,6 +554,8 @@ const Content: React.FC = ({ setUserCredentials({ uri: '', password: '', userName: '', database: '' }); setSelectedNodes([]); setSelectedRels([]); + localStorage.removeItem('instructions'); + setAdditionalInstructions(''); setMessages([ { datetime: `${date.toLocaleDateString()} ${date.toLocaleTimeString()}`, @@ -584,12 +593,12 @@ const Content: React.FC = ({ return prev.map((f) => { return f.name === filename ? { - ...f, - status: 'Ready to Reprocess', - processingProgress: isStartFromBegining ? 0 : f.processingProgress, - nodesCount: isStartFromBegining ? 0 : f.nodesCount, - relationshipsCount: isStartFromBegining ? 0 : f.relationshipsCount, - } + ...f, + status: 'Ready to Reprocess', + processingProgress: isStartFromBegining ? 0 : f.processingProgress, + nodesCount: isStartFromBegining ? 0 : f.nodesCount, + relationshipsCount: isStartFromBegining ? 0 : f.relationshipsCount, + } : f; }); }); @@ -697,20 +706,22 @@ const Content: React.FC = ({ const selectedRows = childRef.current?.getSelectedRows(); if (selectedRows?.length) { const expiredFilesExists = selectedRows.some( - (c) => isFileReadyToProcess(c, true) && isExpired(c?.createdAt as Date) + (c) => c.status !== 'Ready to Reprocess' && isExpired(c?.createdAt as Date ?? new Date()) ); const largeFileExists = selectedRows.some( (c) => isFileReadyToProcess(c, true) && typeof c.size === 'number' && c.size > largeFileSize ); if (expiredFilesExists) { - setshowConfirmationModal(true); - } else if (largeFileExists && isGCSActive) { setshowExpirationModal(true); + } else if (largeFileExists && isGCSActive) { + setshowConfirmationModal(true); } else { handleGenerateGraph(selectedRows.filter((f) => isFileReadyToProcess(f, false))); } } else if (filesData.length) { - const expiredFileExists = filesData.some((c) => isFileReadyToProcess(c, true) && isExpired(c.createdAt as Date)); + const expiredFileExists = filesData.some( + (c) => isExpired(c?.createdAt as Date) + ); const largeFileExists = filesData.some( (c) => isFileReadyToProcess(c, true) && typeof c.size === 'number' && c.size > largeFileSize ); @@ -727,7 +738,7 @@ const Content: React.FC = ({ } else if (expiredFileExists && isGCSActive) { setshowExpirationModal(true); } else { - handleGenerateGraph(filesData.filter((f) => isFileReadyToProcess(f, false))); + handleGenerateGraph(filesData.filter((f) => f.status === 'New' || f.status === 'Ready to Reprocess')); } } }; @@ -771,6 +782,7 @@ const Content: React.FC = ({ onClose={() => setshowConfirmationModal(false)} loading={extractLoading} selectedRows={childRef.current?.getSelectedRows() as CustomFile[]} + isLargeDocumentAlert={true} > )} @@ -837,19 +849,17 @@ const Content: React.FC = ({ />

- {!isSchema ? ( + {!hasSelections ? ( - ) : selectedNodes.length || selectedRels.length ? ( - - ) : ( - - )} + ) : + ( + )}
- {isSchema ? ( + {hasSelections? ( - {(!selectedNodes.length || !selectedNodes.length) && 'Empty'} Graph Schema configured - {selectedNodes.length || selectedRels.length + {(hasSelections)} Graph Schema configured + {hasSelections ? `(${selectedNodes.length} Labels + ${selectedRels.length} Rel Types)` : ''} diff --git a/frontend/src/components/Layout/DrawerDropzone.tsx b/frontend/src/components/Layout/DrawerDropzone.tsx index e937e7438..7396ff786 100644 --- a/frontend/src/components/Layout/DrawerDropzone.tsx +++ b/frontend/src/components/Layout/DrawerDropzone.tsx @@ -1,19 +1,18 @@ +import React, { useMemo, Suspense, lazy } from 'react'; import { Drawer, Flex, StatusIndicator, Typography, useMediaQuery } from '@neo4j-ndl/react'; import DropZone from '../DataSources/Local/DropZone'; -import React, { useMemo, Suspense, lazy } from 'react'; import S3Component from '../DataSources/AWS/S3Bucket'; -import { DrawerProps } from '../../types'; import GCSButton from '../DataSources/GCS/GCSButton'; import CustomAlert from '../UI/Alert'; +import FallBackDialog from '../UI/FallBackDialog'; import { useAlertContext } from '../../context/Alert'; +import { useCredentials } from '../../context/UserCredentials'; import { APP_SOURCES } from '../../utils/Constants'; import GenericButton from '../WebSources/GenericSourceButton'; import GenericModal from '../WebSources/GenericSourceModal'; -import FallBackDialog from '../UI/FallBackDialog'; -import { useCredentials } from '../../context/UserCredentials'; +import { DrawerProps } from '../../types'; const S3Modal = lazy(() => import('../DataSources/AWS/S3Modal')); const GCSModal = lazy(() => import('../DataSources/GCS/GCSModal')); - const DrawerDropzone: React.FC = ({ isExpanded, toggleS3Modal, @@ -24,189 +23,102 @@ const DrawerDropzone: React.FC = ({ showGenericModal, }) => { const { closeAlert, alertState } = useAlertContext(); - const { isReadOnlyUser, isBackendConnected } = useCredentials(); - const largedesktops = useMediaQuery(`(min-width:1440px )`); - - const isYoutubeOnlyCheck = useMemo( - () => APP_SOURCES?.includes('youtube') && !APP_SOURCES.includes('wiki') && !APP_SOURCES.includes('web'), - [APP_SOURCES] + const { isReadOnlyUser, isBackendConnected, connectionStatus } = useCredentials(); + const isLargeDesktop = useMediaQuery('(min-width:1440px)'); + const isYoutubeOnly = useMemo( + () => APP_SOURCES.includes('youtube') && !APP_SOURCES.includes('wiki') && !APP_SOURCES.includes('web'), + [] ); - const isWikipediaOnlyCheck = useMemo( - () => APP_SOURCES?.includes('wiki') && !APP_SOURCES.includes('youtube') && !APP_SOURCES.includes('web'), - [APP_SOURCES] + const isWikipediaOnly = useMemo( + () => APP_SOURCES.includes('wiki') && !APP_SOURCES.includes('youtube') && !APP_SOURCES.includes('web'), + [] ); - const iswebOnlyCheck = useMemo( - () => APP_SOURCES?.includes('web') && !APP_SOURCES.includes('youtube') && !APP_SOURCES.includes('wiki'), - [APP_SOURCES] + const isWebOnly = useMemo( + () => APP_SOURCES.includes('web') && !APP_SOURCES.includes('youtube') && !APP_SOURCES.includes('wiki'), + [] ); - + if (!isLargeDesktop) { + return null; + } return ( -
+
- {!isReadOnlyUser ? ( - - {alertState.showAlert && ( - - )} -
-
-
-
- {process.env.VITE_ENV != 'PROD' && ( - - - {!isBackendConnected ? : } - + {connectionStatus ? ( + !isReadOnlyUser ? ( + + {alertState.showAlert && ( + + )} +
+
+ {process.env.VITE_ENV !== 'PROD' && ( +
+ + Backend connection status - )} -
- {process.env.VITE_ENV != 'PROD' ? ( - <> - - {APP_SOURCES != undefined && APP_SOURCES.includes('local') && ( -
- -
- )} - {(APP_SOURCES != undefined && APP_SOURCES.includes('s3')) || - (APP_SOURCES != undefined && APP_SOURCES.includes('gcs')) ? ( - <> - {(APP_SOURCES.includes('youtube') || - APP_SOURCES.includes('wiki') || - APP_SOURCES.includes('web')) && ( -
- - -
- )} - {APP_SOURCES.includes('s3') && ( -
- - }> - - -
- )} - {APP_SOURCES.includes('gcs') && ( -
- - }> - - -
- )} - - ) : ( - <> - )} -
- - ) : ( - <> - - {APP_SOURCES != undefined && APP_SOURCES.includes('local') && ( -
- -
- )} - {((APP_SOURCES != undefined && APP_SOURCES.includes('youtube')) || - (APP_SOURCES != undefined && APP_SOURCES.includes('wiki')) || - (APP_SOURCES != undefined && APP_SOURCES.includes('web'))) && ( -
- - -
- )} - {(APP_SOURCES != undefined && APP_SOURCES.includes('s3')) || - (APP_SOURCES != undefined && APP_SOURCES.includes('gcs')) ? ( - <> - {APP_SOURCES != undefined && APP_SOURCES.includes('s3') && ( -
- - }> - - -
- )} - {APP_SOURCES != undefined && APP_SOURCES.includes('gcs') && ( -
- - -
- )} - - ) : ( - <> - )} -
- +
)} + + {APP_SOURCES.includes('local') && ( +
+ +
+ )} + {APP_SOURCES.some((source) => ['youtube', 'wiki', 'web'].includes(source)) && ( +
+ + +
+ )} + {APP_SOURCES.includes('s3') && ( +
+ + }> + + +
+ )} + {APP_SOURCES.includes('gcs') && ( +
+ + }> + + +
+ )} +
-
- + + ) : ( + + + This user account does not have permission to access or manage data sources. + + + ) ) : ( - + - This user account does not have permission to access or manage data sources. + You are not logged in. Please Login to access the content. )} @@ -214,5 +126,4 @@ const DrawerDropzone: React.FC = ({
); }; - export default DrawerDropzone; diff --git a/frontend/src/components/Layout/Header.tsx b/frontend/src/components/Layout/Header.tsx index 2cd243b29..f722440c9 100644 --- a/frontend/src/components/Layout/Header.tsx +++ b/frontend/src/components/Layout/Header.tsx @@ -11,10 +11,9 @@ import { ArrowDownTrayIconOutline, } from '@neo4j-ndl/react/icons'; import { Button, TextLink, Typography } from '@neo4j-ndl/react'; -import { Dispatch, memo, SetStateAction, useCallback, useContext, useEffect, useRef, useState } from 'react'; +import { Dispatch, memo, SetStateAction, useCallback, useContext, useRef, useState } from 'react'; import { IconButtonWithToolTip } from '../UI/IconButtonToolTip'; import { buttonCaptions, tooltips } from '../../utils/Constants'; -import { useFileContext } from '../../context/UsersFiles'; import { ThemeWrapperContext } from '../../context/ThemeWrapper'; import { useCredentials } from '../../context/UserCredentials'; import { useNavigate } from 'react-router'; @@ -39,13 +38,9 @@ const Header: React.FC = ({ chatOnly, deleteOnClick, setOpenConnecti window.open(url, '_blank'); }, []); const downloadLinkRef = useRef(null); - const { isSchema, setIsSchema } = useFileContext(); const { connectionStatus } = useCredentials(); const chatAnchor = useRef(null); const [showChatModeOption, setshowChatModeOption] = useState(false); - useEffect(() => { - setIsSchema(isSchema); - }, [isSchema]); const openChatPopout = useCallback(() => { let session = localStorage.getItem('neo4j.connection'); diff --git a/frontend/src/components/Layout/PageLayout.tsx b/frontend/src/components/Layout/PageLayout.tsx index 1b6b478eb..69dcf155b 100644 --- a/frontend/src/components/Layout/PageLayout.tsx +++ b/frontend/src/components/Layout/PageLayout.tsx @@ -52,7 +52,7 @@ const PageLayout: React.FC = () => { }; const { messages, setClearHistoryData, clearHistoryData, setMessages, setIsDeleteChatLoading } = useMessageContext(); - const { isSchema, setIsSchema, setShowTextFromSchemaDialog, showTextFromSchemaDialog } = useFileContext(); + const { setShowTextFromSchemaDialog, showTextFromSchemaDialog } = useFileContext(); const { setConnectionStatus, setGdsActive, @@ -289,8 +289,6 @@ const PageLayout: React.FC = () => { openTextSchema={() => { setShowTextFromSchemaDialog({ triggeredFrom: 'schemadialog', show: true }); }} - isSchema={isSchema} - setIsSchema={setIsSchema} showEnhancementDialog={showEnhancementDialog} toggleEnhancementDialog={toggleEnhancementDialog} setOpenConnection={setOpenConnection} @@ -349,8 +347,6 @@ const PageLayout: React.FC = () => { openTextSchema={() => { setShowTextFromSchemaDialog({ triggeredFrom: 'schemadialog', show: true }); }} - isSchema={isSchema} - setIsSchema={setIsSchema} showEnhancementDialog={showEnhancementDialog} toggleEnhancementDialog={toggleEnhancementDialog} setOpenConnection={setOpenConnection} diff --git a/frontend/src/components/Popups/ExpirationModal/ExpiredFilesAlert.tsx b/frontend/src/components/Popups/ExpirationModal/ExpiredFilesAlert.tsx index 23738820c..2207c866a 100644 --- a/frontend/src/components/Popups/ExpirationModal/ExpiredFilesAlert.tsx +++ b/frontend/src/components/Popups/ExpirationModal/ExpiredFilesAlert.tsx @@ -7,6 +7,7 @@ import BellImage from '../../../assets/images/Stopwatch-blue.svg'; import AlertIcon from '../../Layout/AlertIcon'; import { isExpired } from '../../../utils/Utils'; import { EXPIRATION_DAYS } from '../../../utils/Constants'; +import { IconWithToolTip } from '../../UI/IconButtonToolTip'; const ExpiredFilesAlert: FC = ({ Files, handleToggle, checked }) => { return ( @@ -31,14 +32,8 @@ const ExpiredFilesAlert: FC = ({ Files, handleToggle, checked } { - if (e.target.checked) { - handleToggle(true, f.id); - } else { - handleToggle(false, f.id); - } - }} - isChecked={checked.indexOf(f.id) !== -1} + isChecked={checked.includes(f.id)} + onChange={(e) => handleToggle(e.target.checked, f.id)} htmlAttributes={{ tabIndex: -1 }} /> @@ -53,7 +48,9 @@ const ExpiredFilesAlert: FC = ({ Files, handleToggle, checked } {f.createdAt != undefined && isExpired(f.createdAt) ? ( - + + + ) : ( <> diff --git a/frontend/src/components/Popups/GraphEnhancementDialog/AdditionalInstructions/index.tsx b/frontend/src/components/Popups/GraphEnhancementDialog/AdditionalInstructions/index.tsx new file mode 100644 index 000000000..40be2c99e --- /dev/null +++ b/frontend/src/components/Popups/GraphEnhancementDialog/AdditionalInstructions/index.tsx @@ -0,0 +1,64 @@ +import { Flex, TextArea, Typography, useMediaQuery } from '@neo4j-ndl/react'; +import { buttonCaptions } from '../../../../utils/Constants'; +import { tokens } from '@neo4j-ndl/base'; +import ButtonWithToolTip from '../../../UI/ButtonWithToolTip'; +import { useCallback } from 'react'; +import { useFileContext } from '../../../../context/UsersFiles'; +import { showNormalToast } from '../../../../utils/toasts'; + +export default function AdditionalInstructionsText({ + closeEnhanceGraphSchemaDialog, +}: { + closeEnhanceGraphSchemaDialog: () => void; +}) { + const { breakpoints } = tokens; + const tablet = useMediaQuery(`(min-width:${breakpoints.xs}) and (max-width: ${breakpoints.lg})`); + const { additionalInstructions, setAdditionalInstructions } = useFileContext(); + + const clickAnalyzeIntructHandler = useCallback(async () => { + localStorage.setItem('instructions', additionalInstructions); + closeEnhanceGraphSchemaDialog(); + showNormalToast(`Successfully Applied the Instructions`); + }, [additionalInstructions]); + return ( + +
+ + + + {buttonCaptions.provideAdditionalInstructions} + + + +