{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"VLMEvalKit","owner":"open-compass","isFork":false,"description":"Open-source evaluation toolkit of large vision-language models (LVLMs), support ~100 VLMs, 40+ benchmarks","allTopics":["computer-vision","evaluation","pytorch","gemini","openai","vqa","vit","gpt","multi-modal","clip","claude","openai-api","gpt4","large-language-models","llm","chatgpt","llava","qwen","gpt-4v"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":9,"issueCount":23,"starsCount":1032,"forksCount":145,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,73,30,19,15,26,35,17,26,28,23,10,2,1,10,0,8,13,8,9,8,20,32,14,16,8,22,11,5,3,8,26,15,30,36,59,33,33,11,23,33,20,27,27],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-19T19:15:55.530Z"}},{"type":"Public","name":"opencompass","owner":"open-compass","isFork":false,"description":"OpenCompass is an LLM evaluation platform, supporting a wide range of models (Llama3, Mistral, InternLM2,GPT-4,LLaMa2, Qwen,GLM, Claude, etc) over 100+ datasets.","allTopics":["benchmark","evaluation","openai","llm","chatgpt","large-language-model","llama2","llama3"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":25,"issueCount":180,"starsCount":3769,"forksCount":403,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-19T10:12:05.049Z"}},{"type":"Public","name":"CIBench","owner":"open-compass","isFork":false,"description":"Official Repo of \"CIBench: Evaluation of LLMs as Code Interpreter \"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":5,"forksCount":1,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,4,1,3,0,0,1,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-19T04:51:48.990Z"}},{"type":"Public","name":"ANAH","owner":"open-compass","isFork":false,"description":"[ACL 2024] ANAH: Analytical Annotation of Hallucinations in Large Language Models","allTopics":["acl","gpt","llms","hallucination-detection"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":19,"forksCount":1,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,5,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-12T18:57:53.120Z"}},{"type":"Public","name":"GTA","owner":"open-compass","isFork":false,"description":"Official repository for paper \"GTA: A Benchmark for General Tool Agents\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":29,"forksCount":3,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-12T03:23:24.351Z"}},{"type":"Public","name":"DevBench","owner":"open-compass","isFork":false,"description":"A Comprehensive Benchmark for Software Development.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":83,"forksCount":4,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-05-30T13:10:52.870Z"}},{"type":"Public","name":"Ada-LEval","owner":"open-compass","isFork":false,"description":"The official implementation of \"Ada-LEval: Evaluating long-context LLMs with length-adaptable benchmarks\"","allTopics":["gpt4","llm","long-context"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":49,"forksCount":2,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-22T09:36:27.993Z"}},{"type":"Public","name":"T-Eval","owner":"open-compass","isFork":false,"description":"[ACL2024] T-Eval: Evaluating Tool Utilization Capability of Large Language Models Step by Step","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":32,"starsCount":209,"forksCount":13,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-04-03T21:05:37.907Z"}},{"type":"Public","name":"human-eval","owner":"open-compass","isFork":true,"description":"Code for the paper \"Evaluating Large Language Models Trained on Code\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":2,"forksCount":332,"license":"MIT License","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-03-14T11:55:53.553Z"}},{"type":"Public","name":"CriticBench","owner":"open-compass","isFork":false,"description":"A comprehensive benchmark for evaluating critique ability of LLMs","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":25,"forksCount":1,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-02-24T01:31:08.418Z"}},{"type":"Public","name":"code-evaluator","owner":"open-compass","isFork":false,"description":"A multi-language code evaluation tool.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":0,"starsCount":17,"forksCount":6,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-01-26T04:12:27.117Z"}},{"type":"Public","name":"evalplus","owner":"open-compass","isFork":true,"description":"EvalPlus for rigourous evaluation of LLM-synthesized code","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":1,"forksCount":101,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-20T08:13:21.996Z"}},{"type":"Public","name":"MixtralKit","owner":"open-compass","isFork":false,"description":"A toolkit for inference and evaluation of 'mixtral-8x7b-32kseqlen' from Mistral AI","allTopics":["moe","mistral","llm"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":12,"starsCount":762,"forksCount":81,"license":"Apache License 2.0","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-12-15T19:10:55.603Z"}},{"type":"Public","name":"LawBench","owner":"open-compass","isFork":false,"description":"Benchmarking Legal Knowledge of Large Language Models","allTopics":["law","benchmark","llm","chatgpt"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":235,"forksCount":33,"license":"Apache License 2.0","participation":[9,12,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2023-11-13T06:42:45.191Z"}}],"repositoryCount":14,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"open-compass repositories"}