Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions lightspeed-stack.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@ service:
llama_stack:
# Uses a remote llama-stack service
# The instance would have already been started with a llama-stack-run.yaml file
use_as_library_client: false
# use_as_library_client: false
# Alternative for "as library use"
# use_as_library_client: true
# library_client_config_path: <path-to-llama-stack-run.yaml-file>
url: http://llama-stack:8321
api_key: xyzzy
use_as_library_client: true
library_client_config_path: run.yaml
# url: http://llama-stack:8321
# api_key: xyzzy
user_data_collection:
feedback_enabled: true
feedback_storage: "/tmp/data/feedback"
Expand Down
267 changes: 140 additions & 127 deletions run.yaml
Original file line number Diff line number Diff line change
@@ -1,157 +1,170 @@
version: 2
image_name: starter
external_providers_dir: ${env.EXTERNAL_PROVIDERS_DIR}

apis:
- agents
- batches
- datasetio
- eval
- files
- inference
- safety
- scoring
- tool_runtime
- vector_io

benchmarks: []
conversations_store:
db_path: ~/.llama/storage/conversations.db
type: sqlite
datasets: []
image_name: starter
# external_providers_dir: /opt/app-root/src/.llama/providers.d
inference_store:
db_path: ~/.llama/storage/inference-store.db
type: sqlite
metadata_store:
db_path: ~/.llama/storage/registry.db
type: sqlite
- batches
- datasetio
- eval
- files
- inference
- safety
- scoring
- tool_runtime
- vector_io

providers:
inference:
- provider_id: openai # This ID is a reference to 'providers.inference'
provider_type: remote::openai
config:
api_key: ${env.OPENAI_API_KEY}
allowed_models: ["${env.E2E_OPENAI_MODEL:=gpt-4o-mini}"]
- config: {}
provider_id: sentence-transformers
provider_type: inline::sentence-transformers
files:
- config:
metadata_store:
table_name: files_metadata
backend: sql_default
storage_dir: ~/.llama/storage/files
provider_id: meta-reference-files
provider_type: inline::localfs
safety:
- config:
excluded_categories: []
provider_id: llama-guard
provider_type: inline::llama-guard
scoring:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: llm-as-judge
provider_type: inline::llm-as-judge
config: {}
- provider_id: braintrust
provider_type: inline::braintrust
config:
openai_api_key: '********'
tool_runtime:
- config: {} # Enable the RAG tool
provider_id: rag-runtime
provider_type: inline::rag-runtime
vector_io:
- config: # Define the storage backend for RAG
persistence:
namespace: vector_io::faiss
backend: kv_default
provider_id: faiss
provider_type: inline::faiss
agents:
- config:
persistence:
agent_state:
namespace: agents_state
backend: kv_default
responses:
table_name: agents_responses
backend: sql_default
provider_id: meta-reference
provider_type: inline::meta-reference
batches:
- config:
kvstore:
namespace: batches_store
backend: kv_default
provider_id: reference
provider_type: inline::reference
datasetio:
- config:
kvstore:
namespace: huggingface_datasetio
backend: kv_default
provider_id: huggingface
provider_type: remote::huggingface
- config:
kvstore:
namespace: localfs_datasetio
backend: kv_default
provider_id: localfs
provider_type: inline::localfs
eval:
- config:
kvstore:
namespace: eval_store
backend: kv_default
provider_id: meta-reference
provider_type: inline::meta-reference
scoring_fns: []
server:
port: 8321
storage:
backends:
kv_default: # Define the storage backend type for RAG, in this case registry and RAG are unified i.e. information on registered resources (e.g. models, vector_stores) are saved together with the RAG chunks
kv_default:
type: kv_sqlite
db_path: ${env.KV_STORE_PATH:=~/.llama/storage/rag/kv_store.db}
sql_default:
type: sql_sqlite
db_path: ${env.SQL_STORE_PATH:=~/.llama/storage/sql_store.db}

stores:
metadata:
namespace: registry
backend: kv_default

inference:
table_name: inference_store
backend: sql_default
max_write_queue_size: 10000
num_writers: 4

conversations:
table_name: openai_conversations
backend: sql_default

prompts:
namespace: prompts
backend: kv_default

metadata_store:
type: sqlite
db_path: ~/.llama/storage/registry.db

inference_store:
type: sqlite
db_path: ~/.llama/storage/inference-store.db

conversations_store:
type: sqlite
db_path: ~/.llama/storage/conversations.db

providers:

inference:
- provider_id: openai
provider_type: remote::openai
config:
api_key: ${env.OPENAI_API_KEY}
allowed_models:
- gpt-4o-mini

- provider_id: sentence-transformers
provider_type: inline::sentence-transformers
config:
allowed_models:
- ${env.EMBEDDING_MODEL_DIR}

files:
- provider_id: meta-reference-files
provider_type: inline::localfs
config:
storage_dir: ~/.llama/storage/files
metadata_store:
table_name: files_metadata
backend: sql_default

safety:
- provider_id: llama-guard
provider_type: inline::llama-guard
config:
excluded_categories: []

scoring:
- provider_id: basic
provider_type: inline::basic
config: {}

- provider_id: llm-as-judge
provider_type: inline::llm-as-judge
config: {}

tool_runtime:
- provider_id: rag-runtime
provider_type: inline::rag-runtime
config: {}

vector_io:
- provider_id: solr-vector
provider_type: remote::solr_vector_io
config:
solr_url: http://localhost:8983/solr
collection_name: portal-rag
vector_field: chunk_vector
content_field: chunk
embedding_dimension: 384
embedding_model: ${env.EMBEDDING_MODEL_DIR}
persistence:
namespace: portal-rag
backend: kv_default

agents:
- provider_id: meta-reference
provider_type: inline::meta-reference
config:
persistence:
agent_state:
namespace: agents_state
backend: kv_default
responses:
table_name: agents_responses
backend: sql_default

batches:
- provider_id: reference
provider_type: inline::reference
config:
kvstore:
namespace: batches_store
backend: kv_default

datasetio:
- provider_id: huggingface
provider_type: remote::huggingface
config:
kvstore:
namespace: huggingface_datasetio
backend: kv_default

- provider_id: localfs
provider_type: inline::localfs
config:
kvstore:
namespace: localfs_datasetio
backend: kv_default

registered_resources:
models: []
models:
- model_id: granite-embedding-30m
model_type: embedding
provider_id: sentence-transformers
provider_model_id: /Users/acoba/.llama/models/granite-embedding-30m-english
metadata:
embedding_dimension: 384
Comment on lines +155 to +157
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Hardcoded local file path will break on other machines.

The provider_model_id uses an absolute path /Users/acoba/.llama/models/granite-embedding-30m-english which is specific to a developer's machine. Use an environment variable reference for portability.

🔧 Proposed fix
   models:
     - model_id: granite-embedding-30m
       model_type: embedding
       provider_id: sentence-transformers
-      provider_model_id: /Users/acoba/.llama/models/granite-embedding-30m-english
+      provider_model_id: ${env.EMBEDDING_MODEL_DIR}
       metadata:
         embedding_dimension: 384
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
provider_model_id: /Users/acoba/.llama/models/granite-embedding-30m-english
metadata:
embedding_dimension: 384
models:
- model_id: granite-embedding-30m
model_type: embedding
provider_id: sentence-transformers
provider_model_id: ${env.EMBEDDING_MODEL_DIR}
metadata:
embedding_dimension: 384
🤖 Prompt for AI Agents
In `@run.yaml` around lines 155 - 157, The provider_model_id is hardcoded to a
local absolute path which will break on other machines; update the run.yaml to
read the model path from an environment variable (e.g., replace the literal
"/Users/acoba/..." with a variable reference like ${MODEL_PATH} or similar) and
document a fallback or error if the env var is missing, ensuring the
provider_model_id entry uses that env var reference instead of the hardcoded
string so other developers can set MODEL_PATH in their environment.


shields:
- shield_id: llama-guard
provider_id: llama-guard
provider_shield_id: openai/gpt-4o-mini
vector_dbs: []
datasets: []
scoring_fns: []
benchmarks: []
tool_groups:
- toolgroup_id: builtin::rag # Register the RAG tool
provider_id: rag-runtime
vector_stores:
default_provider_id: faiss
default_embedding_model: # Define the default embedding model for RAG
provider_id: sentence-transformers
model_id: nomic-ai/nomic-embed-text-v1.5
- shield_id: llama-guard
provider_id: llama-guard
provider_shield_id: openai/gpt-4o-mini

vector_stores:
- vector_store_id: portal-rag
provider_id: solr-vector
embedding_model: sentence-transformers//Users/acoba/.llama/models/granite-embedding-30m-english
embedding_dimension: 384
Comment on lines +164 to +168
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Hardcoded local path in embedding_model configuration.

Similar to the model configuration above, embedding_model uses a hardcoded path that won't work on other machines. Additionally, the format sentence-transformers//Users/acoba/... with double slashes appears incorrect.

🔧 Proposed fix
   vector_stores:
     - vector_store_id: portal-rag
       provider_id: solr-vector
-      embedding_model: sentence-transformers//Users/acoba/.llama/models/granite-embedding-30m-english
+      embedding_model: sentence-transformers/${env.EMBEDDING_MODEL_DIR}
       embedding_dimension: 384
🤖 Prompt for AI Agents
In `@run.yaml` around lines 164 - 168, The embedding_model entry under the
vector_stores block (vector_store_id "portal-rag", provider_id "solr-vector")
contains a hardcoded local filesystem path and an incorrect double-slash format;
replace it with a portable value (e.g., a model identifier like
"sentence-transformers/<model-name>" or a reference to an environment/config
variable) and remove the extra slash so the string follows the provider's
expected format; make the value configurable (env var or shared config key)
rather than an absolute path so it works across machines and CI.

safety:
default_shield_id: llama-guard
Loading
Loading