{"id":"npcpy","name":"npcpy","homepage":"https://github.com/NPC-Worldwide/npcpy","repo_url":"https://github.com/NPC-Worldwide/npcpy","category":"ai-framework","subcategories":["ai-agents","multi-agent","nlp","knowledge-graphs"],"tags":["python","agents","multi-agent","nlp","knowledge-graphs","fine-tuning","multimodal","ollama","mcp-client","mit"],"what_it_does":"A Python library for building NLP applications, multimodal AI agents, and multi-agent teams with orchestration, tool calling, Jinx workflow pipelines, knowledge graph construction, and fine-tuning support across Ollama, OpenAI, Anthropic, Gemini, and DeepSeek.","use_cases":["Building individual AI agents with custom personas, directives, and tool-calling capabilities","Orchestrating multi-agent teams with a leader agent coordinating specialized sub-agents","Creating multi-step prompt pipelines (Jinx workflows) with Jinja templating for reproducible tasks","Constructing and evolving knowledge graphs from unstructured text data","Experimenting with fine-tuning via supervised learning, reinforcement learning, or genetic algorithms"],"not_for":["Teams wanting a managed, hosted agent framework (npcpy is a local Python library)","Simple single-LLM API wrapper use cases (overkill complexity)","Production deployments requiring enterprise SLAs or vendor support"],"best_when":"When you need a flexible Python framework to build, experiment with, and orchestrate multi-agent systems across multiple LLM providers, especially for research or complex automation.","avoid_when":"When you need a no-code or low-code agent builder, or when your team is not comfortable with Python and prompt engineering.","alternatives":["langchain","autogen","crewai","llamaindex","pydantic-ai"],"af_score":53.8,"security_score":55.0,"reliability_score":null,"package_type":"mcp_server","discovery_source":["github"],"priority":"low","status":"evaluated","version_evaluated":"latest","last_evaluated":"2026-03-01T09:50:06.012699+00:00","performance":{"latency_p50_ms":null,"latency_p99_ms":null,"uptime_sla_percent":null,"rate_limits":null,"data_source":"llm_estimated","measured_on":null}}