OPENAI_API_KEY= MODEL_NAME="gpt-realtime" # Local vision model (only used with --local-vision CLI flag) # By default, vision is handled by gpt-realtime when the camera tool is used LOCAL_VISION_MODEL=HuggingFaceTB/SmolVLM2-2.2B-Instruct # Cache for local VLM (only used with --local-vision CLI flag) HF_HOME=./cache # Hugging Face token for accessing datasets/models HF_TOKEN= # To select a specific profile with custom instructions and tools, to be placed in profiles//__init__.py REACHY_MINI_CUSTOM_PROFILE="example"