[{"data":1,"prerenderedAt":92},["ShallowReactive",2],{"glossary-en-ai-inference":3},{"id":4,"title":5,"body":6,"description":79,"extension":80,"meta":81,"navigation":87,"path":88,"seo":89,"stem":90,"__hash__":91},"en_glossary/en/glossary/ai-inference.md","What is AI Inference?",{"type":7,"value":8,"toc":70},"minimark",[9,14,21,26,41,45,48],[10,11,13],"h2",{"id":12},"inference","Inference",[15,16,17,20],"p",{},[18,19,13],"strong",{}," is the process where a trained model is put into a live environment to respond to user questions or process data. It is the \"run-time\" phase of AI.",[22,23,25],"h3",{"id":24},"training-vs-inference","Training vs. Inference",[27,28,29,36],"ul",{},[30,31,32,35],"li",{},[18,33,34],{},"Training",": Compute-intensive, takes days/months, uses massive datasets. Teaches the model.",[30,37,38,40],{},[18,39,13],{},": Fast, happens in milliseconds/seconds, processes one input at a time. Uses what the model learned.",[22,42,44],{"id":43},"edge-inference","Edge Inference",[15,46,47],{},"Running inference directly on a local device (Edge AI) rather than in the cloud is a major trend.",[27,49,50,56],{},[30,51,52,55],{},[18,53,54],{},"Benefit",": Reduced latency and increased privacy.",[30,57,58,61,62,69],{},[18,59,60],{},"Future Vision",": We aim to explore Edge Inference capabilities in future versions of our ",[18,63,64],{},[65,66,68],"a",{"href":67},"/en/products/zma-data-acquisition","Data Acquisition products",", allowing them to make intelligent decisions locally even when offline.",{"title":71,"searchDepth":72,"depth":72,"links":73},"",2,[74],{"id":12,"depth":72,"text":13,"children":75},[76,78],{"id":24,"depth":77,"text":25},3,{"id":43,"depth":77,"text":44},"The process of a trained AI model making predictions or decisions based on live data.","md",{"tags":82},[13,83,84,85,86],"AI","Edge Computing","Deployment","Real-time",true,"/en/glossary/ai-inference",{"title":5,"description":79},"en/glossary/ai-inference","U5UTkOqovFzpreoMMLF9eA44zkiHKkGerTJnaeWGbPM",1778229654872]