{"payload":{"pageCount":8,"repositories":[{"type":"Public","name":"mimicgen","owner":"NVlabs","isFork":false,"description":"This code corresponds to simulation environments used as part of the MimicGen project.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":1,"starsCount":286,"forksCount":45,"license":"Other","participation":[0,0,0,1,0,6,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,12,0,0,0,0,0,0,2,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-19T19:22:29.706Z"}},{"type":"Public","name":"EAGLE","owner":"NVlabs","isFork":false,"description":"EAGLE: Exploring The Design Space for Multimodal LLMs with Mixture of Encoders","allTopics":["demo","eagle","llama","lmm","nvdia","huggingface","gpt4","large-language-models","llm","mllm","llava","lvlm","llama3"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":9,"starsCount":405,"forksCount":26,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,7,0,0,0,11,32,20,7],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-19T01:24:14.549Z"}},{"type":"Public","name":"VILA","owner":"NVlabs","isFork":false,"description":"VILA - a multi-image visual language model with training, inference and evaluation recipe, deployable from cloud to edge (Jetson Orin and laptops)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":48,"starsCount":1799,"forksCount":142,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,3,0,0,2,1,2,3,6,0,0,1,0,0,11,4,0,2,1],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-17T20:26:51.579Z"}},{"type":"Public","name":"earth2grid","owner":"NVlabs","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":3,"starsCount":10,"forksCount":0,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,4,0,5,6,4,15,6],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-19T17:47:26.800Z"}},{"type":"Public","name":"RADIO","owner":"NVlabs","isFork":false,"description":"Official repository for \"AM-RADIO: Reduce All Domains Into One\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":18,"starsCount":611,"forksCount":23,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,11,6,2,2,2,0,2,2,1,5,1,0,3,1,1,5,2,0,0,9,8,7,0,0,0,0,4,0,1,0,4,1,6,0,0,0,0,0,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-16T23:08:06.033Z"}},{"type":"Public","name":"gbrl_sb3","owner":"NVlabs","isFork":false,"description":"GBRL-based Actor-Critic algorithms implemented in stable-baselines3","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":20,"forksCount":2,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,6,0,0,1,0,3,4,0,2,0,0,1,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-17T08:29:51.038Z"}},{"type":"Public","name":"acronym","owner":"NVlabs","isFork":false,"description":"This repository contains a sample of the grasping dataset and tools to visualize grasps, generate random scenes, and render observations. The two sample files are in the HDF5 format.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":10,"starsCount":108,"forksCount":25,"license":"MIT License","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-09T19:09:25.126Z"}},{"type":"Public","name":"MambaVision","owner":"NVlabs","isFork":false,"description":"Official PyTorch Implementation of MambaVision: A Hybrid Mamba-Transformer Vision Backbone","allTopics":["deep-learning","transformers","image-classification","mamba","visual-recognition","self-attention","hybrid-models","huggingface-transformers","vision-transformer","foundation-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":9,"starsCount":711,"forksCount":41,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,25,4,1,1,0,2,0,4,2,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-09T14:25:16.660Z"}},{"type":"Public","name":"nvTorchCam","owner":"NVlabs","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":39,"forksCount":2,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,0,0,1,0,0,0,0,5,5,0,1,3,0,0,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-08T16:27:37.950Z"}},{"type":"Public","name":"sds-complete","owner":"NVlabs","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":29,"forksCount":2,"license":null,"participation":[0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,0,0,0,0,0,0,1,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-09-08T12:07:44.320Z"}},{"type":"Public","name":"Deep_Object_Pose","owner":"NVlabs","isFork":false,"description":"Deep Object Pose Estimation (DOPE) – ROS inference (CoRL 2018)","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":8,"issueCount":158,"starsCount":1010,"forksCount":283,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,5,8,3,2,1,0,1,1,1,0,0,0,2,0,2,0,1,0,0,1,0,2,0,1,3,0,5,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-30T23:20:51.783Z"}},{"type":"Public","name":"DoRA","owner":"NVlabs","isFork":false,"description":"[ICML2024 (Oral)] Official PyTorch implementation of DoRA: Weight-Decomposed Low-Rank Adaptation","allTopics":["deep-neural-networks","deep-learning","lora","commonsense-reasoning","vision-and-language","large-language-models","parameter-efficient-tuning","instruction-tuning","large-vision-language-models","parameter-efficient-fine-tuning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":8,"starsCount":571,"forksCount":32,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,16,6,4,3,1,7,0,1,1,0,1,1,0,0,0,0,0,0,0,3,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-30T04:19:09.330Z"}},{"type":"Public","name":"LITA","owner":"NVlabs","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":3,"starsCount":131,"forksCount":9,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,6,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-29T16:12:24.728Z"}},{"type":"Public","name":"diffstack","owner":"NVlabs","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":92,"forksCount":8,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-26T12:36:36.391Z"}},{"type":"Public","name":"CF-3DGS","owner":"NVlabs","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":2,"issueCount":29,"starsCount":312,"forksCount":31,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,1,0,1,0,0,0,0,0,1,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-22T07:17:07.484Z"}},{"type":"Public","name":"IPA","owner":"NVlabs","isFork":false,"description":"Interconnect Prototyping Assistant (IPA) is an interconnect modeling and generation framework built atop [MatchLib] (https://github.com/NVlabs/matchlib) and [Connections] (https://github.com/hlslibs/matchlib_connections) libraries.","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":11,"forksCount":1,"license":"Apache License 2.0","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-20T22:30:34.671Z"}},{"type":"Public","name":"sionna","owner":"NVlabs","isFork":false,"description":"Sionna: An Open-Source Library for Next-Generation Physical Layer Research","allTopics":["open-source","machine-learning","deep-learning","reproducible-research","gpu-acceleration","communications","5g","6g","link-level-simulation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":7,"issueCount":18,"starsCount":737,"forksCount":211,"license":"Other","participation":[0,0,0,0,0,0,0,2,0,2,0,3,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,1,3,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-19T19:48:06.209Z"}},{"type":"Public","name":"traffic-behavior-simulation","owner":"NVlabs","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":5,"starsCount":169,"forksCount":24,"license":"Other","participation":[0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-19T17:01:22.755Z"}},{"type":"Public","name":"DRAIL","owner":"NVlabs","isFork":false,"description":"The Official PyTorch implementation of DRAIL","allTopics":["reinforcement-learning","robotics","imitation-learning","adversarial-machine-learning","diffusion-models","generative-adversarial-imitation-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":8,"forksCount":1,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-19T05:39:13.714Z"}},{"type":"Public","name":"FoundationPose","owner":"NVlabs","isFork":false,"description":"[CVPR 2024 Highlight] FoundationPose: Unified 6D Pose Estimation and Tracking of Novel Objects","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":5,"issueCount":40,"starsCount":1351,"forksCount":174,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,9,2,1,0,2,1,2,1,0,2,2,0,2,0,0,0,1,0,0,0,3,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-18T05:24:06.537Z"}},{"type":"Public","name":"M2T2","owner":"NVlabs","isFork":false,"description":"M2T2: Multi-Task Masked Transformer for Object-centric Pick and Plac","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":1,"issueCount":4,"starsCount":28,"forksCount":2,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-14T23:48:39.300Z"}},{"type":"Public","name":"PerAda","owner":"NVlabs","isFork":false,"description":"Repo for the paper: PerAda: Parameter-Efficient Federated Learning Personalization with Generalization Guarantees (CVPR 2024)","allTopics":["adapter","personalization","knowledge-distillation","federated-learning"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":7,"forksCount":0,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-14T09:10:09.489Z"}},{"type":"Public","name":"curobo","owner":"NVlabs","isFork":false,"description":"CUDA Accelerated Robot Library","allTopics":["robotics","cuda","motion-planning","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":6,"issueCount":26,"starsCount":733,"forksCount":109,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-13T12:15:50.375Z"}},{"type":"Public","name":"SegFormer","owner":"NVlabs","isFork":false,"description":"Official PyTorch implementation of SegFormer","allTopics":["transformer","cityscapes","ade20k","semantic-segmentation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":98,"starsCount":2487,"forksCount":347,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-02T15:50:33.087Z"}},{"type":"Public","name":"InstantSplat","owner":"NVlabs","isFork":false,"description":"InstantSplat: Sparse-view SfM-free Gaussian Splatting in Seconds","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":14,"starsCount":699,"forksCount":33,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,11,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-08-02T05:22:23.049Z"}},{"type":"Public","name":"OmniDrive","owner":"NVlabs","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":9,"starsCount":196,"forksCount":7,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-28T01:02:52.251Z"}},{"type":"Public","name":"MCPNet","owner":"NVlabs","isFork":false,"description":"[CVPR 2024] Official Repository for MCPNet: An Interpretable Classifier via Multi-Level Concept Prototypes","allTopics":["deep-neural-networks","deep-learning","explainable-ai","interpretable-machine-learning","prototype-based-models"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":1,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-08T09:40:21.785Z"}},{"type":"Public","name":"ODISE","owner":"NVlabs","isFork":false,"description":"Official PyTorch implementation of ODISE: Open-Vocabulary Panoptic Segmentation with Text-to-Image Diffusion Models [CVPR 2023 Highlight]","allTopics":["semantic-segmentation","zero-shot-learning","panoptic-segmentation","open-world-classification","diffusion-models","text-image-retrieval","open-vocabulary","open-vocabulary-semantic-segmentation","open-world-object-detection","open-vocabulary-segmentation","deep-learning","pytorch","instance-segmentation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":31,"starsCount":846,"forksCount":48,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-06T07:52:49.504Z"}},{"type":"Public","name":"stylegan2-ada","owner":"NVlabs","isFork":false,"description":"StyleGAN2 with adaptive discriminator augmentation (ADA) - Official TensorFlow implementation","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":4,"issueCount":73,"starsCount":1797,"forksCount":501,"license":"Other","participation":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-07-02T14:00:16.336Z"}},{"type":"Public","name":"Forecasting-Model-Search","owner":"NVlabs","isFork":false,"description":"A system for automating selection and optimization of pre-trained models from the TAO Model Zoo","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":19,"forksCount":3,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2024-06-28T23:07:02.133Z"}}],"repositoryCount":212,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"NVlabs repositories"}