Quick Start
This guide will help you get started with AIMon. Before getting started, make sure you have AIMon installed. If not, follow the installation guide.
It is straightforward to use AIMon. Set the AIMON_API_KEY
environment variable with your API key.
Here is an example using the detect
decorator which is synchronous:
- Python
- TypeScript
from aimon import Detect
# See analyze_prod for the asynchronous version
# that can be used for continuous monitoring
detect = Detect(values_returned=['context', 'generated_text'], config={"hallucination": {"detector_name": "default"}})
@detect
def my_llm_app(context, query):
generated_text = my_llm_model(context, query)
return context, generated_text
import Client from "aimon";
const aimon_client = new Client({
authHeader: `Bearer API_KEY`,
});
const detectMetrics: any = async (sourceText: any) => {
// Split the source text
const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 });
const docs = await textSplitter.createDocuments([sourceText]);
const contextDocs = docs.map((doc) => doc.pageContent);
// Summarize the texts
const llm = new OpenAI({ temperature: 0, openAIApiKey: openaiApiKey });
const chain = loadSummarizationChain(llm, { type: "map_reduce" });
const output = await chain.invoke({
input_documents: docs,
});
const hall_config = {
hallucination: {
detector_name: "default"
}
};
// Detect quality of the generated output using AIMon
const detectParams: Client.InferenceDetectParams.Body[] = [
{
context: contextDocs,
generated_text: output.text,
config: hall_config
},
];
// Call the API
const aimonResponse: Client.InferenceDetectResponse =
await aimon_client.inference.detect(detectParams);
};