Package index
-
add_api_key_header()
- Adds API key header to the qdrant connection for security
-
add_context()
- Adds context to be used by the model when answering
-
add_tools_declaration()
- Add tools declaration for the LLM to use
-
add_vision_capability()
- Add vision capability to the workflow.
-
add_workflow_step()
- Add a step (i.e. another workflow) to an existing workflow
-
ai_workflow()
- Define AI workflow
-
apply_processing_skill()
- Applies a processing skill to the current workflow
-
convert_batch_documents_to_embeddings()
- Convert Batch documents to Embeddings
-
convert_embeddings_to_qdrant_format()
- Convert embeddings to Qdrant format
-
convert_ollama_completion_response_to_tibble()
- Convert an ollama server completion response to a tibble
-
convert_ollama_model_info_response_to_tibble()
- Convert ollama response for model info to a tibble
-
convert_ollama_tags_response_to_tibble()
- Convert an ollama server tags response to a tibble
-
create_custom_processing_skill()
- Create a processing skill template file that can be used to create your own skills
-
display_intermediate_answer()
- Display Intermediate Answer
-
execute_workflow()
- Execute an AI workflow
-
execute_workflow_on_df()
- Execute an AI workflow on a dataframe (with or without a pipe)
-
extract_snippets()
- Extract Snippets
-
generate_document_embeddings()
- Get embeddings for a piece of context through an ollama server instance020
-
generate_numeric_list()
- Generate Numeric List
-
generate_uuid_from_text()
- Generate UUID from text
-
get_list_ollama_models()
- Get a list of models available from the ollama server
-
get_ollama_chat_completion()
- Get chat completion from ollama server
-
get_ollama_completion()
- Get a completion from ollama server
-
get_ollama_connection()
- Define a connection to a local ollama server
-
get_ollama_embeddings()
- Get embeddings for a piece of context through an ollama server instance
-
get_ollama_model_info()
- Get information about one ollama model
-
get_qdrant_connection()
- Get Qdrant connection
-
inspect_processing_skill()
- Inspect a specific processing skill
-
list_global_functions()
- List global functions
-
list_processing_skill_parameters()
- List extra parameters for a given processing skill
-
list_processing_skills()
- list the processing skills
-
load_context_embeddings_from_feather_files()
- Load Context Embeddings From Feather Files
-
load_workflow()
- Load workflow
-
make_cosine_similarity_matrix()
- Make Cosine Similarity Matrix
-
parse_json_result()
- Parse JSON answer from the LLM
-
process_prompts()
- Process Prompts starting from a workflow
-
pull_final_answer()
- Pull Final Answer
-
qdrant_check_collection_existence()
- Qdrant: Check collection existence
-
qdrant_check_connection_validity()
- Qdrant: Check if the Connection is valid
-
qdrant_create_new_collection()
- Qdrant: Create new collection
-
qdrant_delete_collection()
- Qdrant: Delete collection
-
qdrant_delete_points()
- Qdrant: Delete points (vectors)
-
qdrant_get_collection_details()
- Qdrant: Get collection details
-
qdrant_list_all_collections()
- Qdrant: List all collections
-
qdrant_retrieve_point()
- Qdrant: Retrieve a specific point (vector)
-
qdrant_search_points()
- Qdrant: Search points (vectors)
-
qdrant_upsert_points()
- Qdrant: Upsert points
-
request_json_answer()
- Request JSON answer from the LLM
-
retrieve_similar_vectors()
- Retrieve Similar Vectors
-
save_workflow()
- Save workflow
-
set_audience()
- Define a specific audience you want the model to prepare an answer for
-
set_connector()
- Set the connector required to operate the workflow.
-
set_current_time_and_date_reference()
- Set the current time and date as addition reference
-
set_custom_processing_skill()
- Set a custom processing skill (that you created) to give to the workflow.
-
set_default_missing_parameters_in_workflow()
- Set Defaults for missing workflow parameters
-
set_embedding_model()
- Set the embedding model to be used by the workflow
-
set_frequency_penalty()
- Set the frequency penalty of the model used by the flow.
-
set_ip_addr()
- Set the IP Address required to connect to an API server.
-
set_mode()
- Set the mode of the model used by the workflow.
-
set_model()
- Set the LLM model to be used by the workflow
-
set_n_predict()
- Set the number of tokens to be predicted (maximum) by the flow.
-
set_num_ctx()
- Set the length of the context to be handled by the model
-
set_overall_background()
- Set overall background info for your model before an answer is formulated
-
set_port()
- Set the port required to connect to the API server.
-
set_presence_penalty()
- Set the presence penalty of the model used by the flow.
-
set_processing_skill()
- Set the processing skill that you want to give the workflow.
-
set_repeat_penalty()
- Set the repeat penalty of the model used by the flow.
-
set_seed()
- Set the seed of the model used by the workflow.
-
set_style_of_voice()
- Define a specific style of voice that you want the LLM to use when answering
-
set_system_prompt()
- Set the system prompt to be used by the model.
-
set_temperature()
- Set the temperature of the model used by the workflow.
-
split_text_as_paragraphs()
- Split text into paragraphs
-
split_text_as_sentences()
- Split text into sentences
-
switch_to_workflow()
- Switch to workflow
-
test_llamacpp_connection()
- Confirm connection to a Llama.cpp server is working
-
test_ollama_connection()
- Confirm connection to ollama is working
-
write_vectors_to_feather_file()
- Write Vectors to Feather File