Spaces:
Runtime error
Runtime error
| node_lines: | |
| - node_line_name: retrieve_node_line # Arbitrary node line name | |
| nodes: | |
| - node_type: retrieval | |
| strategy: | |
| metrics: [ retrieval_f1, retrieval_recall, retrieval_precision, | |
| retrieval_ndcg, retrieval_map, retrieval_mrr ] | |
| speed_threshold: 10 | |
| top_k: 10 | |
| modules: | |
| - module_type: bm25 | |
| bm25_tokenizer: [ ko_kiwi ] | |
| - module_type: vectordb | |
| embedding_model: openai | |
| embedding_batch: 256 | |
| - module_type: hybrid_rrf | |
| weight_range: (4,80) | |
| - module_type: hybrid_cc | |
| normalize_method: [ mm, tmm, z, dbsf ] | |
| weight_range: (0.0, 1.0) | |
| test_weight_size: 101 | |
| - node_type: passage_augmenter | |
| strategy: | |
| metrics: [ retrieval_f1, retrieval_recall, retrieval_precision ] | |
| speed_threshold: 5 | |
| top_k: 5 | |
| embedding_model: openai | |
| modules: | |
| - module_type: pass_passage_augmenter | |
| - module_type: prev_next_augmenter | |
| mode: next | |
| - node_type: passage_reranker | |
| strategy: | |
| metrics: [ retrieval_f1, retrieval_recall, retrieval_precision ] | |
| speed_threshold: 10 | |
| top_k: 5 | |
| modules: | |
| - module_type: pass_reranker | |
| - module_type: tart | |
| - module_type: monot5 | |
| - module_type: upr | |
| - module_type: rankgpt | |
| - module_type: colbert_reranker | |
| - module_type: sentence_transformer_reranker | |
| - module_type: flag_embedding_reranker | |
| - module_type: flag_embedding_llm_reranker | |
| - module_type: openvino_reranker | |
| - node_type: passage_filter | |
| strategy: | |
| metrics: [ retrieval_f1, retrieval_recall, retrieval_precision ] | |
| speed_threshold: 5 | |
| modules: | |
| - module_type: pass_passage_filter | |
| - module_type: similarity_threshold_cutoff | |
| threshold: 0.85 | |
| - module_type: similarity_percentile_cutoff | |
| percentile: 0.6 | |
| - module_type: threshold_cutoff | |
| threshold: 0.85 | |
| - module_type: percentile_cutoff | |
| percentile: 0.6 | |
| - node_type: passage_compressor | |
| strategy: | |
| metrics: [retrieval_token_f1, retrieval_token_recall, retrieval_token_precision] | |
| speed_threshold: 10 | |
| modules: | |
| - module_type: pass_compressor | |
| - module_type: tree_summarize | |
| llm: openai | |
| model: gpt-4o-mini | |
| prompt: | | |
| ์ฌ๋ฌ ๋ฌธ๋งฅ ์ ๋ณด๋ ๋ค์๊ณผ ๊ฐ์ต๋๋ค.\n | |
| ---------------------\n | |
| {context_str}\n | |
| ---------------------\n | |
| ์ฌ์ ์ง์์ด ์๋ ์ฌ๋ฌ ์ ๋ณด๊ฐ ์ฃผ์ด์ก์ต๋๋ค, | |
| ์ง๋ฌธ์ ๋๋ตํ์ธ์.\n | |
| ์ง๋ฌธ: {query_str}\n | |
| ๋ต๋ณ: | |
| - module_type: refine | |
| llm: openai | |
| model: gpt-4o-mini | |
| prompt: | | |
| ์๋ ์ง๋ฌธ์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค: {query_str} | |
| ๊ธฐ์กด ๋ต๋ณ์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค: {existing_answer} | |
| ์๋์์ ๊ธฐ์กด ๋ต๋ณ์ ์ ์ ํ ์ ์๋ ๊ธฐํ๊ฐ ์์ต๋๋ค. | |
| (ํ์ํ ๊ฒฝ์ฐ์๋ง) ์๋์ ๋ช ๊ฐ์ง ๋งฅ๋ฝ์ ์ถ๊ฐํ์ฌ ๊ธฐ์กด ๋ต๋ณ์ ์ ์ ํ ์ ์์ต๋๋ค. | |
| ------------ | |
| {context_msg} | |
| ------------ | |
| ์๋ก์ด ๋ฌธ๋งฅ์ด ์ฃผ์ด์ง๋ฉด ๊ธฐ์กด ๋ต๋ณ์ ์์ ํ์ฌ ์ง๋ฌธ์ ๋ํ ๋ต๋ณ์ ์ ์ ํฉ๋๋ค. | |
| ๋งฅ๋ฝ์ด ์ธ๋ชจ ์๋ค๋ฉด, ๊ธฐ์กด ๋ต๋ณ์ ๊ทธ๋๋ก ๋ต๋ณํ์ธ์. | |
| ์ ์ ๋ ๋ต๋ณ: | |
| - module_type: longllmlingua | |
| - node_line_name: post_retrieve_node_line # Arbitrary node line name | |
| nodes: | |
| - node_type: prompt_maker | |
| strategy: | |
| metrics: | |
| - metric_name: bleu | |
| - metric_name: meteor | |
| - metric_name: rouge | |
| - metric_name: sem_score | |
| embedding_model: openai | |
| speed_threshold: 10 | |
| generator_modules: | |
| - module_type: llama_index_llm | |
| llm: openai | |
| model: [gpt-4o-mini] | |
| modules: | |
| - module_type: fstring | |
| prompt: ["์ฃผ์ด์ง passage๋ง์ ์ด์ฉํ์ฌ question์ ๋ฐ๋ผ ๋ตํ์์ค passage: {retrieved_contents} \n\n Question: {query} \n\n Answer:"] | |
| - module_type: long_context_reorder | |
| prompt: ["์ฃผ์ด์ง passage๋ง์ ์ด์ฉํ์ฌ question์ ๋ฐ๋ผ ๋ตํ์์ค passage: {retrieved_contents} \n\n Question: {query} \n\n Answer:"] | |
| - node_type: generator | |
| strategy: | |
| metrics: | |
| - metric_name: bleu | |
| - metric_name: meteor | |
| - metric_name: rouge | |
| - metric_name: sem_score | |
| embedding_model: openai | |
| speed_threshold: 10 | |
| modules: | |
| - module_type: llama_index_llm | |
| llm: [openai] | |
| model: [gpt-4o-mini] | |
| temperature: [0.5, 1.0] | |