{"version":"1.0","workflow_uuid":"5d1acc2e-f42d-4fce-aec7-771506f858ae","workflow_title":"Helicone Cache — Cut LLM Spend with Drop-In Response Caching","install_contract":{"version":"1.0","installReady":false,"title":"Helicone Cache — Cut LLM Spend with Drop-In Response Caching","summary":"Helicone Cache short-circuits identical LLM requests at the proxy. Set Helicone-Cache-Enabled header, exact-match responses come back in ms at zero cost.","assetType":"Knowledge","pageUrl":"","sourceUrl":"https://github.com/Helicone","intendedFor":[],"firstActions":[],"agentFirstSteps":[],"targetPaths":[],"verification":[],"startingPoints":[],"example":"","successOutcome":"","boundaries":[],"askUserIf":["the current workspace stack cannot be matched to a safe upstream template","the target path is not the project root, or an existing file should be merged instead of overwritten"]}}