{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"rubra","owner":"rubra-ai","isFork":false,"description":"Open Weight, tool-calling LLMs","allTopics":["tools","ai","ai-agents","large-language-models","function-calling"],"primaryLanguage":{"name":"Makefile","color":"#427819"},"pullRequestCount":2,"issueCount":3,"starsCount":135,"forksCount":19,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,14,6,0,7,73,24,6,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,5,42],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-05T00:59:51.484Z"}},{"type":"Public","name":"tools.cpp","owner":"rubra-ai","isFork":true,"description":"LLM inference in C/C++, further modified for Rubra function calling models","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":8743,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-02T23:06:07.294Z"}},{"type":"Public","name":"rubra-tools","owner":"rubra-ai","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Jupyter Notebook","color":"#DA5B0B"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,10,10,0,1,5,3,0,10,4],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-29T00:49:13.123Z"}},{"type":"Public","name":"llamafile","owner":"rubra-ai","isFork":true,"description":"Distribute and run LLMs with a single file. Rubra customized version that adds grammar to chat completion api.","allTopics":[],"primaryLanguage":{"name":"C++","color":"#f34b7d"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":835,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-29T00:43:10.368Z"}},{"type":"Public","name":"vllm","owner":"rubra-ai","isFork":true,"description":"A high-throughput and memory-efficient inference and serving engine for LLMs. Extended for Rubra function calling models","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":3135,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-29T00:27:57.980Z"}},{"type":"Public","name":"rubra-embed-benchmark","owner":"rubra-ai","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":0,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-28T02:59:27.543Z"}}],"repositoryCount":6,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"rubra-ai repositories"}