{"version":"1.0","workflow_uuid":"5a86ab54-470d-11f1-9bc6-00163e2b0d79","workflow_title":"OpenVINO — Optimize and Deploy AI Inference Across Intel Hardware","install_contract":{"version":"1.0","installReady":false,"title":"OpenVINO — Optimize and Deploy AI Inference Across Intel Hardware","summary":"OpenVINO is an open-source toolkit from Intel for optimizing and deploying deep learning models across Intel CPUs, GPUs, and NPUs with maximum performance.","assetType":"Configs","pageUrl":"https://tokrepo.com/en/workflows/asset-5a86ab54","sourceUrl":"https://github.com/openvinotoolkit/openvino","intendedFor":[],"firstActions":[],"agentFirstSteps":[],"targetPaths":[],"verification":[],"startingPoints":[],"example":"","successOutcome":"","boundaries":[],"askUserIf":["the current workspace stack cannot be matched to a safe upstream template","the target path is not the project root, or an existing file should be merged instead of overwritten"]}}