How to Use Guide
ollama pull llama3.1
./go-crew
name = 'default.toml'
prompt = 'Hello World' # initial prompt
default_model = 'llama3.1'
default_temperature = 0.8
default_model_context = 0 # 0 = automatic
default_timeout = 60
concurrency = 2 # for more speed
language = '' # for force a response language
system_prompt = ''
server_url = '' # for custom servers
verbose = false
[hosting]
enabled = false # host a webpage
port = 3000
auto_open = false # open automatically when start
[embedding]
enabled = false # use RAG functions
database = 'default.db'
database_compressed = false
database_encryption_key = ''
model = 'nomic-embed-text' # model for embedding
contexts_rerank = true
contexts_per_request = 3
[embedding.chunking]
semantic_chunking = true # use semantic chunking
semantic_threshold = 0.456
chunk_max_size = 1024
chunk_overlap = 154 # 0.15*chunk
[[embedding.sources]]
document = 'document.txt' # read a file, can use * for name
[[embedding.sources]]
document = 'document.csv' # read a csv file
header_question='question' # optional, if you want to use a header for the question embedding
header_answer='answer' # optional, if you want to use a header for the answer response
[[embedding.sources]]
url = 'https://www.jonathanhecl.com/go-crew/' # read a website