From dbb633e2558eb2e21293cf98e58fae390a61013c Mon Sep 17 00:00:00 2001 From: Dmitry Ryumin Date: Tue, 30 Jan 2024 20:56:12 +0300 Subject: [PATCH] Update files --- ...-from-a-single-image-and-shape-from-x.json | 68 +++++++ json_data/3d-from-multi-view-and-sensors.json | 173 ++++++++++++++++++ .../3d-shape-modeling-and-processing.json | 46 +++++ json_data/action-and-event-understanding.json | 30 +++ json_data/adversarial-attack-and-defense.json | 53 ++++++ json_data/biometrics.json | 9 + json_data/computational-imaging.json | 37 ++++ json_data/computer-vision-theory.json | 9 + json_data/datasets-and-evaluation.json | 53 ++++++ ...learning-architectures-and-techniques.json | 45 +++++ .../document-analysis-and-understanding.json | 13 ++ json_data/efficient-and-scalable-vision.json | 63 +++++++ ...odied-vision-active-agents-simulation.json | 15 ++ json_data/explainable-ai-for-cv.json | 21 +++ json_data/faces-and-gestures.json | 45 +++++ ...transparency-accountability-in-vision.json | 41 +++++ json_data/first-person-egocentric-vision.json | 7 + json_data/generative-ai.json | 24 +++ json_data/geometric-deep-learning.json | 8 + .../human-in-the-loop-computer-vision.json | 6 + json_data/human-poseshape-estimation.json | 47 +++++ json_data/humans-3d-modeling-and-driving.json | 12 ++ json_data/image-and-video-forensics.json | 11 ++ json_data/image-and-video-synthesis.json | 135 ++++++++++++++ .../low-level-and-physics-based-vision.json | 115 ++++++++++++ json_data/low-level-vision-and-theory.json | 12 ++ json_data/machine-learning-and-dataset.json | 12 ++ ...ine-learning-other-than-deep-learning.json | 11 ++ ...and-biological-vision-cell-microscopy.json | 40 ++++ ...tion-estimation-matching-and-tracking.json | 59 ++++++ json_data/multimodal-learning.json | 30 +++ .../navigation-and-autonomous-driving.json | 51 ++++++ json_data/neural-generative-models.json | 34 ++++ .../object-pose-estimation-and-tracking.json | 16 ++ .../photogrammetry-and-remote-sensing.json | 11 ++ ...-security-fairness-and-explainability.json | 8 + json_data/recognition-categorization.json | 50 +++++ json_data/recognition-detection.json | 73 ++++++++ json_data/recognition-retrieval.json | 31 ++++ ...ition-segmentation-and-shape-analysis.json | 12 ++ json_data/representation-learning.json | 40 ++++ .../scene-analysis-and-understanding.json | 40 ++++ ...mentation-grouping-and-shape-analysis.json | 72 ++++++++ ...self--semi--and-unsupervised-learning.json | 12 ++ ...lf--semi--meta--unsupervised-learning.json | 67 +++++++ ...nsfer-low-shot-and-continual-learning.json | 12 ++ ...low-shot-continual-long-tail-learning.json | 110 +++++++++++ .../video-analysis-and-understanding.json | 51 ++++++ json_data/vision-and-audio.json | 12 ++ json_data/vision-and-graphics.json | 22 +++ json_data/vision-and-language.json | 127 +++++++++++++ json_data/vision-and-robotics.json | 11 ++ .../vision-applications-and-systems.json | 36 ++++ json_data/vision-graphics-and-robotics.json | 8 + ...uctive-priors-for-data-efficient-dl-w.json | 17 ++ ...ge-on-deepfake-analysis-and-detection.json | 12 ++ ...-in-plant-phenotyping-and-agriculture.json | 25 +++ ...w-on-new-ideas-in-vision-transformers.json | 18 ++ ...ion-learning-with-very-limited-images.json | 20 ++ ...phs-and-graph-representation-learning.json | 10 + json_data/w-to-nerf-or-not-to-nerf.json | 2 + .../w-uncertainty-estimation-for-cv.json | 14 ++ ...-next-in-multimodal-foundation-models.json | 9 + 63 files changed, 2283 insertions(+) diff --git a/json_data/3d-from-a-single-image-and-shape-from-x.json b/json_data/3d-from-a-single-image-and-shape-from-x.json index d5696a2..6e352ea 100644 --- a/json_data/3d-from-a-single-image-and-shape-from-x.json +++ b/json_data/3d-from-a-single-image-and-shape-from-x.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Aggregating_Feature_Point_Cloud_for_Depth_Completion_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Coordinate_Transformer_Achieving_Single-stage_Multi-person_Mesh_Recovery_from_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10334", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yasarla_MAMo_Leveraging_Memory_and_Attention_for_Monocular_Video_Depth_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14336", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yasarla_MAMo_Leveraging_Memory_and_Attention_for_Monocular_Video_Depth_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Creative_Birds_Self-Supervised_Single-View_3D_Style_Transfer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14127", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bai_Dynamic_PlenOctree_for_Adaptive_Sampling_Refinement_in_Explicit_NeRF_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15333", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_CORE_Co-planarity_Regularized_Monocular_Geometry_Estimation_with_Weak_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Papantoniou_Relightify_Relightable_3D_Faces_from_a_Single_Image_via_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.06077", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_GLA-GCN_Global-local_Adaptive_Graph_Convolutional_Network_for_3D_Human_Pose_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.05853", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Calibrating_Panoramic_Depth_Estimation_for_Practical_Localization_and_Mapping_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14005", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wewer_SimNP_Learning_Self-Similarity_Priors_Between_Neural_Points_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03809", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_AGG-Net_Attention_Guided_Gated-Convolutional_Network_for_Depth_Image_Completion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01624", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Szymanowicz_Viewset_Diffusion_0-Image-Conditioned_3D_Generative_Models_from_2D_Data_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.07881", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_CVSformer_Cross-View_Synthesis_Transformer_for_Semantic_Scene_Completion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07938", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Di_U-RED_Unsupervised_3D_Shape_Retrieval_and_Deformation_for_Partial_Point_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06383", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Single_Depth-image_3D_Reflection_Symmetry_and_Shape_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Saunders_Self-supervised_Monocular_Depth_Estimation_Lets_Talk_About_The_Weather_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08357", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bokhovkin_Mesh2Tex_Generating_Mesh_Textures_from_Image_Queries_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.05868", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Sketch_and_Text_Guided_Diffusion_Model_for_Colored_Point_Cloud_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.02874", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lyu_Learning_a_Room_with_the_Occ-SDF_Hybrid_Signed_Distance_Function_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09152", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Robust_Geometry-Preserving_Depth_Estimation_Using_Differentiable_Rendering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.09724", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_FeatureNeRF_Learning_Generalizable_NeRFs_by_Distilling_Foundation_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12786", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_One-shot_Implicit_Animatable_Avatars_with_Model-based_Priors_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.02469", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_VeRi3D_Generative_Vertex-based_Radiance_Fields_for_3D_Controllable_Human_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04800", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Diffuse3D_Wide-Angle_3D_Photography_via_Bilateral_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dang_AutoSynth_Learning_to_Generate_3D_Training_Data_for_Object_Point_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11170", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Body_Knowledge_and_Uncertainty_Modeling_for_Monocular_3D_Human_Body_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.00799", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Accurate_3D_Face_Reconstruction_with_Facial_Component_Tokens_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yin_Metric3D_Towards_Zero-shot_Metric_3D_Prediction_from_A_Single_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10984", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zuo_Reconstructing_Interacting_Hands_with_Interaction_Prior_from_Monocular_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14082", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_SparseNeRF_Distilling_Depth_Ranking_for_Few-shot_Novel_View_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16196", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Beyond_the_Limitation_of_Monocular_3D_Detector_via_Knowledge_Distillation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chai_HiFace_High-Fidelity_3D_Face_Reconstruction_by_Learning_Static_and_Dynamic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11225", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Animal3D_A_Comprehensive_Dataset_of_3D_Animal_Pose_and_Shape_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11737", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_JOTR_3D_Joint_Contrastive_Learning_with_Transformers_for_Occluded_Human_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16377", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_D-IF_Uncertainty-aware_Human_Digitization_via_Implicit_Distribution_Field_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08857", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_3D_Distillation_Improving_Self-Supervised_Monocular_Depth_Estimation_on_Reflective_Surfaces_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_DeformToon3D_Deformable_Neural_Radiance_Fields_for_3D_Toonification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04410", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_MonoDETR_Depth-guided_Transformer_for_Monocular_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2203.13310", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chan_ReLeaPS__Reinforcement_Learning-based_Illumination_Planning_for_Generalized_Photometric_Stereo_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Vavilala_Convex_Decomposition_of_Indoor_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.04246", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Irshad_NeO_360_Neural_Fields_for_Sparse_View_Synthesis_of_Outdoor_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12967", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_UrbanGIRAFFE_Representing_Urban_Scenes_as_Compositional_Generative_Neural_Feature_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14167", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lan_Efficient_Converted_Spiking_Neural_Network_for_3D_and_2D_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Foo_Distribution-Aligned_Diffusion_for_Human_Mesh_Recovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13369", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guizilini_Towards_Zero-Shot_Scale-Aware_Monocular_Depth_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.17253", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Costanzino_Learning_Depth_Estimation_for_Transparent_and_Mirror_Surfaces_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15052", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Uni-3D_A_Universal_Model_for_Panoptic_3D_Scene_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_3D_VR_Sketch_Guided_3D_Shape_Prototyping_and_Exploration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.10830", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shao_Transparent_Shape_from_a_Single_View_Polarization_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2204.06331", @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiong_Get3DHuman_Lifting_StyleGAN-Human_into_a_3D_Generative_Model_Using_Pixel-Aligned_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.01162", @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/cvlab/zero123-live", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Zero-1-to-3_Zero-shot_One_Image_to_3D_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11328", @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_FrozenRecon_Pose-free_3D_Scene_Reconstruction_with_Frozen_Depth_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05733", @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Arshad_LIST_Learning_Implicitly_from_Spatial_Transformers_for_Single-View_3D_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12194", @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_3DMiner_Discovering_Shapes_from_Large-Scale_Unannotated_Image_Datasets_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.19188", @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_Nonrigid_Object_Contact_Estimation_With_Regional_Unwrapping_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14074", @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_SHERF_Generalizable_Human_NeRF_from_a_Single_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12791", @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Full-Body_Articulated_Human-Object_Interaction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.10621", @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_PlaneRecTR_Unified_Query_Learning_for_3D_Plane_Recovery_from_a_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13756", @@ -1485,6 +1544,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_SceneRF_Self-Supervised_Monocular_3D_Scene_Reconstruction_with_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.02501", @@ -1510,6 +1570,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_3D-Aware_Neural_Body_Fitting_for_Occlusion_Robust_3D_Human_Pose_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10123", @@ -1535,6 +1596,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Two-in-One_Depth_Bridging_the_Gap_Between_Monocular_and_Binocular_Self-Supervised_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.00933", @@ -1560,6 +1622,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_LRRU_Long-short_Range_Recurrent_Updating_Networks_for_Depth_Completion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.08956", @@ -1585,6 +1648,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_OccFormer_Dual-path_Transformer_for_Vision-based_3D_Semantic_Occupancy_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.05316", @@ -1610,6 +1674,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_CHORD_Category-level_Hand-held_Object_Reconstruction_via_Shape_Deformation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10574", @@ -1635,6 +1700,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yao_NDC-Scene_Boost_Monocular_3D_Semantic_Scene_Completion_in_Normalized_Device_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14616", @@ -1660,6 +1726,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Neural_Video_Depth_Stabilizer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08695", @@ -1685,6 +1752,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_DiLiGenT-Pi_Photometric_Stereo_for_Planar_Surfaces_with_Rich_Details_-_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/3d-from-multi-view-and-sensors.json b/json_data/3d-from-multi-view-and-sensors.json index bf58bf2..27d10dd 100644 --- a/json_data/3d-from-multi-view-and-sensors.json +++ b/json_data/3d-from-multi-view-and-sensors.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Multi-Modal_Neural_Radiance_Field_for_Monocular_Dense_SLAM_with_a_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14383", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yeshwanth_ScanNet_A_High-Fidelity_Dataset_of_3D_Indoor_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11417", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_Translating_Images_to_Road_Network_A_Non-Autoregressive_Sequence-to-Sequence_Approach_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_Doppelgangers_Learning_to_Disambiguate_Images_of_Similar_Structures_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02420", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mai_EgoLoc_Revisiting_3D_Object_Localization_from_Egocentric_Videos_with_Visual_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.06969", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_ClothPose_A_Real-world_Benchmark_for_Visual_Analysis_of_Garment_Pose_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_EMR-MSF_Self-Supervised_Recurrent_Monocular_Scene_Flow_Exploiting_Ego-Motion_Rigidity_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_ENVIDR_Implicit_Differentiable_Renderer_with_Neural_Environment_Lighting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13022", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Learning_a_More_Continuous_Zero_Level_Set_in_Unsigned_Distance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11441", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cong_Enhancing_NeRF_akin_to_Enhancing_LLMs_Generalizable_NeRF_Transformer_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11793", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_MatrixCity_A_Large-scale_City_Dataset_for_City-scale_Neural_Rendering_and_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Schmied_R3D3_Dense_3D_Reconstruction_of_Dynamic_Scenes_from_Multiple_Cameras_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14713", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_ClimateNeRF_Extreme_Weather_Synthesis_in_Neural_Radiance_Field_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.13226", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiang_Rendering_Humans_from_Object-Occluded_Monocular_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04622", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiangli_AssetField_Assets_Mining_and_Reconfiguration_in_Ground_Feature_Plane_Representation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13953", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_PETRv2_A_Unified_Framework_for_3D_Perception_from_Multi-Camera_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.01256", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kaneko_MIMO-NeRF_Fast_Neural_Rendering_with_Multi-input_Multi-output_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_Adaptive_Positional_Encoding_for_Bundle-Adjusting_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_NeuS2_Fast_Learning_of_Neural_Implicit_Surfaces_for_Multi-view_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.05231", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Learning_from_Semantic_Alignment_between_Unpaired_Multiviews_for_Egocentric_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11489", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jing_Uncertainty_Guided_Adaptive_Warping_for_Robust_and_Efficient_Stereo_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14071", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bratelund_Compatibility_of_Fundamental_Matrices_for_Complete_Viewing_Graphs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10658", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_ProtoTransfer_Cross-Modal_Prototype_Transfer_for_Point_Cloud_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_SA-BEV_Generating_Semantic-Aware_Birds-Eye-View_Feature_for_Multi-view_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11477", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_GraphAlign_Enhancing_Accurate_Feature_Alignment_by_Graph_matching_for_Multi-Modal_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Terekhov_Tangent_Sampson_Error_Fast_Approximate_Two-view_Reprojection_Error_for_Central_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Puy_Using_a_Waffle_Iron_for_Automotive_Point_Cloud_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.10100", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hajder_Fast_Globally_Optimal_Surface_Normal_Estimation_from_an_Affine_Correspondence_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Buhler_Preface_A_Data-driven_Volumetric_Prior_for_Few-shot_Ultra_High-resolution_Face_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yi_Canonical_Factors_for_Hybrid_Neural_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.15461", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Center-Based_Decoupled_Point-cloud_Registration_for_6D_Object_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hagemann_Deep_Geometry-Aware_Camera_Self-Calibration_from_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Burgdorfer_V-FUSE_Volumetric_Depth_Map_Fusion_with_Long-Range_Constraints_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08715", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_Consistent_Depth_Prediction_for_Transparent_Object_Reconstruction_from_RGB-D_Camera_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hwang_FaceCLIPNeRF_Text-driven_3D_Face_Manipulation_using_Deformable_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11418", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_HollowNeRF_Pruning_Hashgrid-Based_NeRFs_with_Trainable_Collision_Mitigation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10122", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_ICE-NeRF_Interactive_Color_Editing_of_NeRFs_via_Decomposition-Aware_Weight_Optimization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_FULLER_Unified_Multi-modality_Multi-task_3D_Perception_via_Multi-level_Gradient_Calibration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16617", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shandilya_Neural_Fields_for_Structured_Lighting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_CO-Net_Learning_Multiple_Point_Cloud_Tasks_at_Once_with_A_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Pose-Free_Neural_Radiance_Fields_via_Implicit_Pose_Regularization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.15049", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pan_TransHuman_A_Transformer-based_Human_Representation_for_Generalizable_Neural_Human_Rendering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12291", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_S-VolSDF_Sparse_Multi-View_Stereo_Regularization_of_Neural_Implicit_Surfaces_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17712", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tian_DPS-Net_Deep_Polarimetric_Stereo_Depth_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shu_3DPPE_3D_Point_Positional_Encoding_for_Transformer-based_Multi-Camera_3D_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.14710", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Deformable_Neural_Radiance_Fields_using_RGB_and_Event_Cameras_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08416", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_NeILF_Inter-Reflectable_Light_Fields_for_Geometry_and_Material_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17147", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ren_Hierarchical_Prior_Mining_for_Non-local_Multi-View_Stereo_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09758", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Exploring_Object-Centric_Temporal_Modeling_for_Efficient_Multi-View_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11926", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rojas_Re-ReND_Real-Time_Rendering_of_NeRFs_across_Devices_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08717", @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Learning_Shape_Primitives_via_Implicit_Convexity_Regularization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yin_Geometry-guided_Feature_Learning_and_Fusion_for_Indoor_Scene_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_LiDAR-Camera_Panoptic_Segmentation_via_Geometry-Consistent_and_Semantic-Aware_Alignment_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.01686", @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ding_PivotNet_Vectorized_Pivot_Learning_for_End-to-end_HD_Map_Construction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16477", @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qian_Sat2Density_Faithful_Density_Learning_from_Satellite-Ground_Image_Pairs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14672", @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lai_Mask-Attention-Free_Transformer_for_3D_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01692", @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_Scene-Aware_Feature_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09949", @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Revisiting_Domain-Adaptive_3D_Object_Detection_by_Reliable_Diverse_and_Class-balanced_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07944", @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_GO-SLAM_Global_Optimization_for_Consistent_3D_Instant_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02436", @@ -1485,6 +1544,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Piedade_BANSAC_A_Dynamic_BAyesian_Network_for_Adaptive_SAmple_Consensus_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08690", @@ -1510,6 +1570,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rydell_Theoretical_and_Numerical_Analysis_of_3D_Reconstruction_Using_Point_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13593", @@ -1535,6 +1596,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_RealGraph_A_Multiview_Dataset_for_4D_Real-world_Context_Graph_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1560,6 +1622,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiong_CL-MVSNet_Unsupervised_Multi-View_Stereo_with_Dual-Level_Contrastive_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1585,6 +1648,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zong_Temporal_Enhanced_Training_of_Multi-view_3D_Object_Detector_via_Historical_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.00967", @@ -1610,6 +1674,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Object_as_Query_Lifting_Any_2D_Object_Detector_to_3D_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.02364", @@ -1635,6 +1700,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nie_PARTNER_Level_up_the_Polar_Representation_for_LiDAR_3D_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03982", @@ -1660,6 +1726,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Not_Every_Side_Is_Equal_Localization_Uncertainty_Estimation_for_Semi-Supervised_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1685,6 +1752,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mundra_LiveHand_Real-time_and_Photorealistic_Neural_Hand_Rendering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.07672", @@ -1710,6 +1778,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ju_DG-Recon_Depth-Guided_Neural_3D_Scene_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1735,6 +1804,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_SparseBEV_High-Performance_Sparse_3D_Object_Detection_from_Multi-Camera_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09244", @@ -1760,6 +1830,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_Strivec_Sparse_Tri-Vector_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13226", @@ -1785,6 +1856,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pittaluga_LDP-Feat_Image_Features_with_Local_Differential_Privacy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11223", @@ -1810,6 +1882,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_SparseFusion_Fusing_Multi-Modal_Sparse_Representations_for_Multi-Sensor_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.14340", @@ -1835,6 +1908,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dhiman_Strata-NeRF__Neural_Radiance_Fields_for_Stratified_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10337", @@ -1860,6 +1934,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_CRN_Camera_Radar_Net_for_Accurate_Robust_Efficient_3D_Perception_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.00670", @@ -1885,6 +1960,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lindenberger_LightGlue_Local_Feature_Matching_at_Light_Speed_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.13643", @@ -1910,6 +1986,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_ExBluRF_Efficient_Radiance_Fields_for_Extreme_Motion_Blurred_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08957", @@ -1935,6 +2012,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Generalized_Differentiable_RANSAC_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.13185", @@ -1960,6 +2038,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_Constraining_Depth_Map_Geometry_for_Multi-View_Stereo_A_Dual-Depth_Approach_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09160", @@ -1985,6 +2064,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_Total-Recon_Deformable_Scene_Reconstruction_for_Embodied_View_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.12317", @@ -2010,6 +2090,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Seal-3D_Interactive_Pixel-Level_Editing_for_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15131", @@ -2035,6 +2116,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yuan_PointMBF_A_Multi-scale_Bidirectional_Fusion_Network_for_Unsupervised_RGB-D_Point_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04782", @@ -2060,6 +2142,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ying_PARF_Primitive-Aware_Radiance_Fusion_for_Indoor_Scene_Novel_View_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.17190", @@ -2085,6 +2168,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Rethinking_Point_Cloud_Registration_as_Masking_and_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2110,6 +2194,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Ada3D__Exploiting_the_Spatial_Redundancy_with_Adaptive_Inference_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08209", @@ -2135,6 +2220,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_Delicate_Textured_Mesh_Recovery_from_NeRF_via_Adaptive_Surface_Refinement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.02091", @@ -2160,6 +2246,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_CVRecon_Rethinking_3D_Geometric_Feature_Learning_For_Neural_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.14633", @@ -2185,6 +2272,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_RICO_Regularizing_the_Unobservable_for_Indoor_Compositional_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08605", @@ -2210,6 +2298,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_Multiscale_Representation_for_Real-Time_Anti-Aliasing_Neural_Rendering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.10075", @@ -2235,6 +2324,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lou_ELFNet_Evidential_Local-global_Fusion_for_Stereo_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.00728", @@ -2260,6 +2350,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ngo_GaPro_Box-Supervised_3D_Point_Cloud_Instance_Segmentation_Using_Gaussian_Processes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13251", @@ -2285,6 +2376,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dal_Cin_Multi-body_Depth_and_Camera_Pose_Estimation_from_Multiple_Views_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2310,6 +2402,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mirzaei_Reference-guided_Controllable_Inpainting_of_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.09677", @@ -2335,6 +2428,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiang_Retro-FPN_Retrospective_Feature_Pyramid_Network_for_Point_Cloud_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09314", @@ -2360,6 +2454,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_GeoMIM_Towards_Better_3D_Knowledge_Transfer_via_Masked_Image_Modeling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11325", @@ -2385,6 +2480,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_OpenOccupancy_A_Large_Scale_Benchmark_for_Surrounding_Semantic_Occupancy_Perception_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.03991", @@ -2410,6 +2506,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Popovic_Surface_Normal_Clustering_for_Implicit_Representation_of_Manhattan_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.01331", @@ -2435,6 +2532,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Choe_Spacetime_Surface_Regularization_for_Neural_Dynamic_Scene_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2460,6 +2558,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_LDL_Line_Distance_Functions_for_Panoramic_Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13989", @@ -2485,6 +2584,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Learning_Neural_Implicit_Surfaces_with_Object-Aware_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2510,6 +2610,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tian_MonoNeRF_Learning_a_Generalizable_Dynamic_Radiance_Field_from_Monocular_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.13056", @@ -2535,6 +2636,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chang_Neural_Radiance_Field_with_LiDAR_maps_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2560,6 +2662,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Deformable_Model-Driven_Neural_Rendering_for_High-Fidelity_3D_Reconstruction_of_Human_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13855", @@ -2585,6 +2688,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guizilini_DeLiRa_Self-Supervised_Depth_Light_and_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02797", @@ -2610,6 +2714,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lorraine_ATT3D_Amortized_Text-to-3D_Object_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.07349", @@ -2635,6 +2740,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ramazzina_ScatterNeRF_Seeing_Through_Fog_with_Physically-Based_Inverse_Neural_Rendering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.02103", @@ -2660,6 +2766,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Weinzaepfel_CroCo_v2_Improved_Cross-view_Completion_Pre-training_for_Stereo_Matching_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.10408", @@ -2685,6 +2792,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Guiding_Local_Feature_Matching_with_Surface_Curvature_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2710,6 +2818,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_NaviNeRF_NeRF-based_3D_Representation_Disentanglement_by_Latent_Semantic_Navigation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.11342", @@ -2735,6 +2844,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hui_Efficient_LiDAR_Point_Cloud_Oversegmentation_Network_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2760,6 +2870,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Alaniz_Iterative_Superquadric_Recomposition_of_3D_Objects_from_Multiple_Views_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02102", @@ -2785,6 +2896,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_S3IM_Stochastic_Structural_SIMilarity_and_Its_Unreasonable_Effectiveness_for_Neural_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07032", @@ -2810,6 +2922,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Neural-PBIR_Reconstruction_of_Shape_Material_and_Illumination_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.13445", @@ -2835,6 +2948,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Predict_to_Detect_Prediction-guided_3D_Object_Detection_using_Sequential_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.08528", @@ -2860,6 +2974,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_ObjectFusion_Multi-modal_3D_Object_Detection_with_Object-Centric_Fusion_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2885,6 +3000,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sanchez_Domain_Generalization_of_3D_Semantic_Segmentation_in_Autonomous_Driving_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.04245", @@ -2910,6 +3026,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_When_Epipolar_Constraint_Meets_Non-Local_Operators_in_Multi-View_Stereo_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.17218", @@ -2935,6 +3052,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Hierarchical_Point-based_Active_Learning_for_Semi-supervised_Point_Cloud_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11166", @@ -2960,6 +3078,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_UniT3D_A_Unified_Transformer_for_3D_Dense_Captioning_and_Visual_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.00836", @@ -2985,6 +3104,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Warburg_Nerfbusters_Removing_Ghostly_Artifacts_from_Casually_Captured_NeRFs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.10532", @@ -3010,6 +3130,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Clutter_Detection_and_Removal_in_3D_Scenes_with_View-Consistent_Inpainting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.03763", @@ -3035,6 +3156,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Koo_PG-RCNN_Semantic_Surface_Point_Generation_for_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12637", @@ -3060,6 +3182,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Distributed_Bundle_Adjustment_with_Block-Based_Sparse_Matrix_Compression_for_Super_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08383", @@ -3085,6 +3208,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Adaptive_Reordering_Sampler_with_Neurally_Guided_MAGSAC_ICCV_2023_paper.pdf", "paper_arxiv_id": "2111.14093", @@ -3110,6 +3234,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pan_Privacy_Preserving_Localization_via_Coordinate_Permutations_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -3135,6 +3260,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_WaveNeRF_Wavelet-based_Generalizable_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04826", @@ -3160,6 +3286,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_TransIFF_An_Instance-Level_Feature_Fusion_Framework_for_Vehicle-Infrastructure_Cooperative_3D_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -3185,6 +3312,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Density-invariant_Features_for_Distant_Point_Cloud_Registration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09788", @@ -3210,6 +3338,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_UMIFormer_Mining_the_Correlations_between_Similar_Tokens_for_Multi-View_3D_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.13987", @@ -3235,6 +3364,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Neural_LiDAR_Fields_for_Novel_View_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.01643", @@ -3260,6 +3390,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Learning_Unified_Decompositional_and_Compositional_NeRF_for_Editable_Novel_View_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.02840", @@ -3285,6 +3416,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Long-Range_Grouping_Transformer_for_Multi-View_3D_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08724", @@ -3310,6 +3442,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_Cross_Modal_Transformer_Towards_Fast_and_Robust_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.01283", @@ -3335,6 +3468,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_KECOR_Kernel_Coding_Rate_Maximization_for_Active_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07942", @@ -3360,6 +3494,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_C2F2NeUS_Cascade_Cost_Frustum_Fusion_for_High_Fidelity_and_Generalizable_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.10003", @@ -3385,6 +3520,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_End-to-end_3D_Tracking_with_Decoupled_Queries_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -3410,6 +3546,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_LU-NeRF_Scene_and_Pose_Estimation_by_Synchronizing_Local_Unposed_NeRFs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.05410", @@ -3435,6 +3572,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_GridPull_Towards_Scalability_in_Learning_Implicit_Representations_from_3D_Point_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13175", @@ -3460,6 +3598,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Low_Robust_e-NeRF_NeRF_from_Sparse__Noisy_Events_under_Non-Uniform_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08596", @@ -3485,6 +3624,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zeng_Parameterized_Cost_Volume_for_Stereo_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -3510,6 +3650,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Coordinate_Quantized_Neural_Implicit_Representations_for_Multi-view_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11025", @@ -3535,6 +3676,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_Pixel-Aligned_Recurrent_Queries_for_Multi-View_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.01401", @@ -3560,6 +3702,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Optimizing_the_Placement_of_Roadside_LiDARs_for_Autonomous_Driving_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.07247", @@ -3585,6 +3728,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mu_ActorsNeRF_Animatable_Few-shot_Human_Rendering_with_Generalizable_NeRFs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.14401", @@ -3610,6 +3754,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhan_NeRFrac_Neural_Radiance_Fields_through_Refractive_Surface_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -3635,6 +3780,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_CPCM_Contextual_Point_Cloud_Modeling_for_Weakly-supervised_Point_Cloud_Semantic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10316", @@ -3660,6 +3806,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Stier_FineRecon_Depth-aware_Feed-forward_Network_for_Detailed_3D_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01480", @@ -3685,6 +3832,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sandstrom_Point-SLAM_Dense_Neural_Point_Cloud-based_SLAM_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.04278", @@ -3710,6 +3858,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Samet_You_Never_Get_a_Second_Chance_To_Make_a_Good_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.11762", @@ -3735,6 +3884,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kulhanek_Tetra-NeRF_Representing_Neural_Radiance_Fields_Using_Tetrahedra_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.09987", @@ -3760,6 +3910,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bartolomei_Active_Stereo_Without_Pattern_Projector_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.12315", @@ -3785,6 +3936,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_HOSNeRF_Dynamic_Human-Object-Scene_Neural_Radiance_Fields_from_a_Single_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.12281", @@ -3810,6 +3962,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_PlankAssembly_Robust_3D_Reconstruction_from_Three_Orthographic_Views_with_Learnt_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05744", @@ -3835,6 +3988,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Efficient_View_Synthesis_with_Neural_Radiance_Distribution_Field_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11130", @@ -3860,6 +4014,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_Query_Refinement_Transformer_for_3D_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -3885,6 +4040,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_TrajectoryFormer_3D_Object_Tracking_Transformer_with_Predictive_Trajectory_Hypotheses_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.05888", @@ -3910,6 +4066,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_NerfAcc_Efficient_Sampling_Accelerates_NeRFs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.04966", @@ -3935,6 +4092,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_NeTONeural_Reconstruction_of_Transparent_Objects_with_Self-Occlusion_Aware_Refraction-Tracing_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11219", @@ -3960,6 +4118,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Text2Tex_Text-driven_Texture_Synthesis_via_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11396", @@ -3985,6 +4144,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Learning_Long-Range_Information_with_Dual-Scale_Transformers_for_Indoor_Scene_Completion_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -4010,6 +4170,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_NeRF-MS_Neural_Radiance_Fields_with_Multi-Sequence_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -4035,6 +4196,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Barron_Zip-NeRF_Anti-Aliased_Grid-Based_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06706", @@ -4060,6 +4222,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Mixed_Neural_Voxels_for_Fast_Multi-view_Video_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.00190", @@ -4085,6 +4248,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_Diffusion-Guided_Reconstruction_of_Everyday_Hand-Object_Interaction_Clips_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05663", @@ -4110,6 +4274,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kerr_LERF_Language_Embedded_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09553", @@ -4135,6 +4300,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Haque_Instruct-NeRF2NeRF_Editing_3D_Scenes_with_Instructions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12789", @@ -4160,6 +4326,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ventura_P1AC_Revisiting_Absolute_Pose_From_a_Single_Affine_Correspondence_ICCV_2023_paper.pdf", "paper_arxiv_id": "2011.08790", @@ -4185,6 +4352,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sklyarova_Neural_Haircut_Prior-Guided_Strand-Based_Hair_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.05872", @@ -4210,6 +4378,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_Tri-MipRF_Tri-Mip_Representation_for_Efficient_Anti-Aliasing_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11335", @@ -4235,6 +4404,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shaban_LiDAR-UDA_Self-ensembling_Through_Time_for_Unsupervised_LiDAR_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.13523", @@ -4260,6 +4430,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Tracking_Everything_Everywhere_All_at_Once_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.05422", @@ -4285,6 +4456,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Khirodkar_Ego-Humans_An_Ego-Centric_3D_Multi-Human_Benchmark_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.16487", @@ -4310,6 +4482,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_Once_Detected_Never_Lost_Surpassing_Human_Performance_in_Offline_LiDAR_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.12315", diff --git a/json_data/3d-shape-modeling-and-processing.json b/json_data/3d-shape-modeling-and-processing.json index fec1ba9..6775ed2 100644 --- a/json_data/3d-shape-modeling-and-processing.json +++ b/json_data/3d-shape-modeling-and-processing.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_2D3D-MATR_2D-3D_Matching_Transformer_for_Detection-Free_Registration_Between_Images_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05667", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_Learning_Versatile_3D_Shape_Generation_with_Improved_Auto-regressive_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14700", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Su_CaPhy_Capturing_Physical_Properties_for_Animatable_Human_Avatars_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05925", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zha_Instance-aware_Dynamic_Prompt_Tuning_for_Pre-trained_Point_Cloud_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.07221", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Structure-Aware_Surface_Reconstruction_via_Primitive_Assembly_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hartman_BaRe-ESA_A_Riemannian_Framework_for_Unregistered_Human_Body_Shapes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.13185", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_Speech4Mesh_Speech-Assisted_Monocular_3D_Facial_Reconstruction_for_Speech-Driven_3D_Facial_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Learning_Point_Cloud_Completion_without_Complete_Point_Clouds_A_Pose-Aware_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ren_GeoUDF_Surface_Reconstruction_from_3D_Point_Clouds_via_Geometry-guided_Distance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.16762", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mani_SurfsUP_Learning_Fluid_Simulation_for_Novel_Surfaces_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06197", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_DeFormer_Integrating_Transformers_with_Deformable_Models_for_3D_Shape_Abstraction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.12594", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_Neural_Deformable_Models_for_3D_Bi-Ventricular_Heart_Shape_Reconstruction_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07693", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nakayama_DiffFacto_Controllable_Part-Based_3D_Point_Cloud_Generation_with_Cross_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.01921", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Self-supervised_Learning_of_Implicit_Shape_Representation_with_Dense_Correspondence_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12590", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Novello_Neural_Implicit_Surface_Evolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2201.09636", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_PointDC_Unsupervised_Semantic_Segmentation_of_3D_Point_Clouds_via_Cross-Modal_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Erkoc_HyperDiffusion_Generating_Implicit_Neural_Fields_with_Weight-Space_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17015", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Leveraging_SE3_Equivariance_for_Learning_3D_Geometric_Shape_Assembly_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.06810", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shuai_DPF-Net_Combining_Explicit_Shape_Priors_in_Deformable_Primitive_Field_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13225", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Sample-adaptive_Augmentation_for_Point_Cloud_Recognition_Against_Real-world_Corruptions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.10431", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tao_3DHacker_Spectrum-based_Decision_Boundary_Generation_for_Hard-label_3D_Point_Cloud_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07546", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cui_P2C_Self-Supervised_Point_Cloud_Completion_from_Single_Partial_Clouds_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14726", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shao_Towards_Multi-Layered_3D_Garments_Animation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.10418", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_AvatarCraft_Transforming_Text_into_Neural_Human_Avatars_with_Parameterized_Shape_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17606", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_Blending-NeRF_Text-Driven_Localized_Editing_in_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11974", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_SIRA-PCR_Sim-to-Real_Adaptation_for_3D_Point_Cloud_Registration_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_3D_Semantic_Subspace_Traverser_Empowering_3D_Generative_Model_with_Shape_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14051", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_DMNet_Delaunay_Meshing_Network_for_3D_Shape_Representation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hong_Attention_Discriminant_Sampling_for_Point_Clouds_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/KAIST-Geometric-AI-Lab/salad-demo", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Koo_SALAD_Part-Level_Latent_Diffusion_for_3D_Shape_Generation_and_Manipulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12236", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_MAPConNet_Self-supervised_3D_Pose_Transfer_with_Mesh_and_Point_Contrastive_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.13819", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yi_Invariant_Training_2D-3D_Joint_Hard_Samples_for_Few-Shot_Point_Cloud_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09694", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Levi_EPiC_Ensemble_of_Partial_Point_Clouds_for_Robust_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11419", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_Leveraging_Intrinsic_Properties_for_Non-Rigid_Garment_Alignment_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09519", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Spatially_and_Spectrally_Consistent_Deep_Functional_Maps_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08871", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_SVDFormer_Complementing_Point_Cloud_via_Self-view_Augmentation_and_Self-structure_Dual-generator_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08492", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Batch-based_Model_Registration_for_Fast_3D_Sherd_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.06897", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_Implicit_Autoencoder_for_Point-Cloud_Self-Supervised_Representation_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2201.00785", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_E3Sym_Leveraging_E3_Invariance_for_Unsupervised_3D_Planar_Reflective_Symmetry_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gralnik_Semantify_Simplifying_the_Control_of_3D_Morphable_Models_Using_CLIP_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07415", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Maruani_VoroMesh_Learning_Watertight_Surface_Meshes_with_Voronoi_Diagrams_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14616", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zuo_DG3D_Generating_High_Quality_3D_Textured_Shapes_by_Learning_to_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Corona-Figueroa_Unaligned_2D_to_3D_Translation_with_Conditional_Vector-Quantized_Code_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14152", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_Hyperbolic_Chamfer_Distance_for_Point_Cloud_Completion_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mikaeili_SKED_Sketch-guided_Text-based_3D_Editing_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10735", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Babiloni_Adaptive_Spiral_Layers_for_Efficient_3D_Representation_Learning_on_Meshes_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/action-and-event-understanding.json b/json_data/action-and-event-understanding.json index 8f32937..de94836 100644 --- a/json_data/action-and-event-understanding.json +++ b/json_data/action-and-event-understanding.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ghoddoosian_Weakly-Supervised_Action_Segmentation_and_Unseen_Error_Detection_in_Anomalous_Instructional_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Diffusion_Action_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17959", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nugroho_Audio-Visual_Glance_Network_for_Efficient_Video_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09322", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xia_Learning_from_Noisy_Pseudo_Labels_for_Semi-Supervised_Temporal_Action_Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Video_Action_Recognition_with_Attentive_Semantic_Units_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09756", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mao_Masked_Motion_Predictors_are_Strong_3D_Action_Representation_Learners_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07092", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rachavarapu_Boosting_Positive_Segments_for_Weakly-Supervised_Audio-Visual_Video_Parsing_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Weakly-Supervised_Action_Localization_by_Hierarchically-Structured_Latent_Attention_Modeling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09946", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Few-Shot_Common_Action_Localization_via_Cross-Attentional_Fusion_of_Context_and_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nakatani_Interaction-aware_Joint_Attention_Estimation_Using_People_Attributes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05382", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_FineDance_A_Fine-grained_Choreography_Dataset_for_3D_Full_Body_Dance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.03741", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhai_SOAR_Scene-debiasing_Open-set_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01265", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Leveraging_Spatio-Temporal_Dependency_for_Skeleton-Based_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.04761", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Cross-Modal_Learning_with_3D_Deformable_Attention_for_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.05638", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiang_Generative_Action_Description_Prompts_for_Skeleton-based_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.05318", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Self-Feedback_DETR_for_Temporal_Action_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10570", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Skip-Plan_Procedure_Planning_in_Instructional_Videos_via_Condensed_Action_Space_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zara_The_Unreasonable_Effectiveness_of_Large_Language-Vision_Models_for_Source-Free_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09139", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Flaborea_Multimodal_Motion_Conditioned_Diffusion_Model_for_Skeleton-based_Video_Anomaly_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07205", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_Video_Anomaly_Detection_via_Sequentially_Learning_Multiple_Pretext_Tasks_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/An_MiniROAD_Minimal_RNN_Framework_for_Online_Action_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bahrami_How_Much_Temporal_Long-Term_Context_is_Needed_for_Action_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11358", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nag_DiffTAD_Temporal_Action_Detection_with_Proposal_Denoising_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14863", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shah_STEPs_Self-Supervised_Key_Step_Extraction_and_Localization_from_Unlabeled_Procedural_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.00794", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Efficient_Video_Action_Detection_with_Token_Dropout_and_Context_Refinement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.08451", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_FSAR_Federated_Skeleton-based_Action_Recognition_with_Adaptive_Topology_Structure_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.11046", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Exploring_Predicate_Visual_Context_in_Detecting_of_Human-Object_Interactions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06202", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_E2E-LOAD_End-to-End_Long-form_Online_Action_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.07703", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Revisiting_Foreground_and_Background_Separation_in_Weakly-supervised_Temporal_Action_Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Hierarchically_Decomposed_Graph_Convolutional_Networks_for_Skeleton-Based_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.10741", diff --git a/json_data/adversarial-attack-and-defense.json b/json_data/adversarial-attack-and-defense.json index 057ec54..c2149be 100644 --- a/json_data/adversarial-attack-and-defense.json +++ b/json_data/adversarial-attack-and-defense.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Robust_Mixture-of-Expert_Training_for_Convolutional_Neural_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10110", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_Set-level_Guidance_Attack_Boosting_Adversarial_Transferability_of_Vision-Language_Pre-training_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14061", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bansal_CleanCLIP_Mitigating_Data_Poisoning_Attacks_in_Multimodal_Contrastive_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.03323", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Reza_CGBA_Curvature-aware_Geometric_Black-box_Attack_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03163", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Robust_Evaluation_of_Diffusion-Based_Adversarial_Purification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09051", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ge_Advancing_Example_Exploitation_Can_Alleviate_Critical_Challenges_in_Adversarial_Training_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_The_Victim_and_The_Beneficiary_Exploiting_a_Poisoned_Model_to_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sur_TIJO_Trigger_Inversion_with_Joint_Optimization_for_Defending_Multimodal_Backdoored_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03906", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Stolik_SAGA_Spectral_Adversarial_Geometric_Attack_on_3D_Meshes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.13775", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ji_Benchmarking_and_Analyzing_Robust_Point_Cloud_Recognition_Bag_of_Tricks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16361", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Suryanto_ACTIVE_Towards_Highly_Transferable_3D_Physical_Camouflage_for_Universal_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07009", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Frequency-aware_GAN_for_Adversarial_Manipulation_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Breaking_Temporal_Consistency_Generating_Video_Universal_Adversarial_Perturbations_Using_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fang_Tracing_the_Origin_of_Adversarial_Attack_for_Forensic_Investigation_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.01218", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Downstream-agnostic_Adversarial_Examples_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12280", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Su_Hiding_Visual_Information_via_Obfuscating_Adversarial_Perturbations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2209.15304", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_An_Embarrassingly_Simple_Backdoor_Attack_on_Self-supervised_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.07346", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Efficient_Decision-based_Black-box_Patch_Attacks_on_Video_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11917", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Suzuki_Adversarial_Finetuning_with_Latent_Representation_Constraint_to_Mitigate_Accuracy-Robustness_Tradeoff_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16454", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bu_Towards_Building_More_Robust_Models_with_Frequency_Bias_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09763", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Does_Physical_Adversarial_Example_Really_Matter_to_Autonomous_Driving_Towards_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11894", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Improving_Generalization_of_Adversarial_Training_via_Robust_Critical_Fine-Tuning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.02533", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Enhancing_Generalization_of_Universal_Adversarial_Perturbation_through_Gradient_Aggregation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06015", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Unified_Adversarial_Patch_for_Cross-Modal_Attacks_in_the_Physical_World_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07859", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_RFLA_A_Stealthy_Reflected_Light_Adversarial_Attack_in_the_Physical_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07653", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Enhancing_Fine-Tuning_Based_Backdoor_Defense_with_Sharpness-Aware_Minimization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.11823", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shum_Conditional_360-degree_Image_Synthesis_for_Immersive_Indoor_Scene_Decoration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09621", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_An_Adaptive_Model_Ensemble_Adversarial_Attack_for_Boosting_Adversarial_Transferability_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.02897", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Mitigating_Adversarial_Vulnerability_through_Causal_Parameter_Estimation_by_Adversarial_Double_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07250", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qian_LEA2_A_Lightweight_Ensemble_Adversarial_Attack_via_Non-overlapping_Vulnerable_Frequency_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jin_Explaining_Adversarial_Robustness_of_Neural_Networks_from_Clustering_Effect_Perspective_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ding_VertexSerum_Poisoning_Graph_Neural_Networks_for_Link_Inference_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.01469", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Maho_How_to_Choose_your_Best_Allies_for_a_Transferable_Attack_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02312", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Enhancing_Adversarial_Robustness_in_Low-Label_Regime_via_Adaptively_Weighted_Regularization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04061", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_AdvDiffuser_Natural_Adversarial_Example_Synthesis_with_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_FF_Attack_Adversarial_Attack_against_Multiple_Object_Trackers_by_Inducing_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Struppek_Rickrolling_the_Artist_Injecting_Backdoors_into_Text_Encoders_for_Text-to-Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.02408", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_Hard_No-Box_Adversarial_Attack_on_Skeleton-Based_Human_Action_Recognition_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05681", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Structure_Invariant_Transformation_for_better_Adversarial_Transferability_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Beating_Backdoor_Attack_at_Its_Own_Game_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15539", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Transferable_Adversarial_Attack_for_Both_Vision_Transformers_and_Convolutional_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hingun_REAP_A_Large-Scale_Realistic_Adversarial_Patch_Benchmark_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.05680", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Multi-Metrics_Adaptively_Identifies_Backdoors_in_Federated_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06601", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Backpropagation_Path_Search_On_Adversarial_Transferability_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07625", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yeo_Rapid_Network_Adaptation_Learning_to_Adapt_Neural_Networks_Using_Test-Time_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.15762", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_One-bit_Flip_is_All_You_Need_When_Bit-flip_Attack_Meets_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07934", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_PolicyCleanse_Backdoor_Detection_and_Mitigation_for_Competitive_Reinforcement_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2202.03609", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ruan_Towards_Viewpoint-Invariant_Visual_Recognition_via_Adversarial_Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10235", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Fast_Adversarial_Training_with_Smooth_Convergence_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12857", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shejwalkar_The_Perils_of_Learning_From_Unlabeled_Data_Backdoor_Attacks_on_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.00453", @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Boosting_Adversarial_Transferability_via_Gradient_Relevance_Attack_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gan_Towards_Robust_Model_Watermark_via_Reducing_Parametric_Vulnerability_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04777", @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_TRM-UAP_Enhancing_the_Transferability_of_Data-Free_Universal_Adversarial_Perturbation_via_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/biometrics.json b/json_data/biometrics.json index f046efc..64653c8 100644 --- a/json_data/biometrics.json +++ b/json_data/biometrics.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fu_GPGait_Generalized_Pose-based_Gait_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05234", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shen_RPG-Palm_Realistic_Pseudo-data_Generation_for_Palmprint_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14016", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Learning_Clothing_and_Pose_Invariant_3D_Shape_Representation_for_Long-Term_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10658", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Physics-Augmented_Autoencoder_for_3D_Skeleton-Based_Gait_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Hierarchical_Spatio-Temporal_Representation_Learning_for_Gait_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09856", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Boutros_IDiff-Face_Synthetic-based_Face_Recognition_through_Fizzy_Identity-Conditioned_Diffusion_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shahreza_Template_Inversion_Attack_against_Face_Recognition_Systems_using_3D_Face_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mi_Privacy-Preserving_Face_Recognition_Using_Random_Frequency_Components_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10461", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Srivatsan_FLIP_Cross-domain_Face_Anti-spoofing_with_Language_Guidance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16649", diff --git a/json_data/computational-imaging.json b/json_data/computational-imaging.json index 3669826..8e6326e 100644 --- a/json_data/computational-imaging.json +++ b/json_data/computational-imaging.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Khan_Tiled_Multiplane_Images_for_Practical_3D_Photography_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14291", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gupta_Eulerian_Single-Photon_Vision_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_ProPainter_Improving_Propagation_and_Transformer_for_Video_Inpainting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03897", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tai_Global_Perception_Based_Autoregressive_Neural_Processes_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_DOLCE_A_Model-Based_Probabilistic_Diffusion_Framework_for_Limited-Angle_CT_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.12340", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_GlowGAN_Unsupervised_Learning_of_HDR_Images_from_LDR_Images_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.12352", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_Score-Based_Diffusion_Models_as_Principled_Priors_for_Inverse_Imaging_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.11751", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fujimura_NLOS-NeuS_Non-line-of-sight_Neural_Implicit_Surface_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12280", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_MEFLUT_Unsupervised_1D_Lookup_Tables_for_Multi-exposure_Image_Fusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11847", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Temporal-Coded_Spiking_Neural_Networks_with_Dynamic_Firing_Threshold_Learning_with_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Enhancing_Non-line-of-sight_Imaging_via_Learnable_Inverse_Kernel_and_Attention_Mechanisms_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lv_Aperture_Diffraction_for_Compact_Snapshot_Spectral_Imaging_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16372", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_Content-Aware_Local_GAN_for_Photo-Realistic_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Iskender_RED-PSM_Regularization_by_Denoising_of_Partially_Separable_Models_for_Dynamic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.03483", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bhat_Self-Supervised_Burst_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_Coherent_Event_Guided_Low-Light_Video_Enhancement_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jungerman_Panoramas_from_Photons_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03811", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Designing_Phase_Masks_for_Under-Display_Cameras_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Deep_Optics_for_Video_Snapshot_Compressive_Imaging_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shah_TiDy-PSFs_Computational_Imaging_with_Time-Averaged_Dynamic_Point-Spread-Functions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17583", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yao_Generalized_Lightness_Adaptation_with_Channel_Selective_Normalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13783", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qu_Towards_Nonlinear-Motion-Aware_and_Occlusion-Robust_Rolling_Shutter_Correction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.18125", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yadav_FCCNs_Fully_Complex-valued_Convolutional_Networks_using_Complex-valued_Color_Model_and_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Event_Camera_Data_Pre-training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.01928", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Improving_3D_Imaging_with_Pre-Trained_Perpendicular_2D_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08440", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ren_Multiscale_Structure_Guided_Diffusion_for_Image_Deblurring_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.01789", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Generalizing_Event-Based_Motion_Deblurring_in_Real-World_Scenarios_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05932", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hong_On_the_Robustness_of_Normalizing_Flows_for_Inverse_Problems_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.04319", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gutierrez-Barragan_Learned_Compressive_Representations_for_Single-Photon_3D_Imaging_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_Recovering_a_Molecules_3D_Dynamics_from_Liquid-phase_Electron_Microscopy_Movies_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11927", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Niu_NIR-assisted_Video_Enhancement_via_Unpaired_24-hour_Data_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chan_SpinCam_High-Speed_Imaging_via_a_Rotating_Point-Spread_Function_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liao_RecRecNet_Rectangling_Rectified_Wide-Angle_Images_by_Thin-Plate_Spline_Model_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.01661", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Weng_Affective_Image_Filter_Reflecting_Emotions_from_Text_to_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Towards_General_Low-Light_Raw_Noise_Synthesis_and_Modeling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16508", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Unsupervised_Video_Deraining_with_An_Event_Camera_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_LoLep_Single-View_View_Synthesis_with_Locally-Learned_Planes_and_Self-Attention_Occlusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12217", diff --git a/json_data/computer-vision-theory.json b/json_data/computer-vision-theory.json index 8b7cc02..c91fc9c 100644 --- a/json_data/computer-vision-theory.json +++ b/json_data/computer-vision-theory.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Min_Environment-Invariant_Curriculum_Relation_Learning_for_Fine-Grained_Scene_Graph_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03282", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_DCPB_Deformable_Convolution_Based_on_the_Poincare_Ball_for_Top-view_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tu_FemtoDet_An_Object_Detection_Baseline_for_Energy_Versus_Performance_Tradeoffs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.06719", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Saratchandran_Curvature-Aware_Training_for_Coordinate_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.08552", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Aiger_Yes_we_CANN_Constrained_Approximate_Nearest_Neighbors_for_Local_Feature-Based_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.09012", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Unleashing_the_Potential_of_Spiking_Neural_Networks_with_Dynamic_Confidence_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nakano_Minimal_Solutions_to_Uncalibrated_Two-view_Geometry_with_Known_Epipoles_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_FBLNet_FeedBack_Loop_Network_for_Driver_Attention_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.02096", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Deep_Feature_Deblurring_Diffusion_for_Detecting_Out-of-Distribution_Objects_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/datasets-and-evaluation.json b/json_data/datasets-and-evaluation.json index 9be7a13..d8990b1 100644 --- a/json_data/datasets-and-evaluation.json +++ b/json_data/datasets-and-evaluation.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pintea_A_step_towards_understanding_why_classification_helps_regression_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10603", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_DNA-Rendering_A_Diverse_Neural_Actor_Repository_for_High-Fidelity_Human-Centric_Rendering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10173", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kong_Robo3D_Towards_Robust_and_Reliable_3D_Perception_against_Corruptions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17597", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Barkan_Efficient_Discovery_and_Effective_Evaluation_of_Visual_Perceptual_Similarity_A_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14753", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_DetermiNet_A_Large-Scale_Diagnostic_Dataset_for_Complex_Visually-Grounded_Referencing_using_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03483", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Beyond_Object_Recognition_A_New_Benchmark_towards_Object_Concept_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.02710", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bakr_HRS-Bench_Holistic_Reliable_and_Scalable_Benchmark_for_Text-to-Image_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.05390", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shinoda_SegRCDB_Semantic_Segmentation_via_Formula-Driven_Supervised_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_LoTE-Animal_A_Long_Time-span_Dataset_for_Endangered_Animal_Behavior_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Building3D_A_Urban-Scale_Dataset_and_Benchmarks_for_Learning_Roof_Structures_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11914", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Lecture_Presentations_Multimodal_Dataset_Towards_Understanding_Multimodality_in_Educational_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.08080", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_Probabilistic_Precision_and_Recall_Towards_Reliable_Evaluation_of_Generative_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01590", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_EgoObjects_A_Large-Scale_Egocentric_Dataset_for_Fine-Grained_Object_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08816", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Peng_CAME_Contrastive_Automated_Model_Evaluation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11111", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pan_Aria_Digital_Twin_A_New_Benchmark_Dataset_for_Egocentric_3D_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.06362", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Exploring_Video_Quality_Assessment_on_User_Generated_Contents_from_Aesthetic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.04894", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cascante-Bonilla_Going_Beyond_Nouns_With_Vision__Language_Models_Using_Synthetic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17590", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_H3WB_Human3.6M_3D_WholeBody_Dataset_and_Benchmark_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.15692", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Alibeigi_Zenseact_Open_Dataset_A_Large-Scale_and_Diverse_Multimodal_Dataset_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.02008", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Maninis_CAD-Estate_Large-scale_CAD_Model_Annotation_in_RGB_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.09011", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_Neglected_Free_Lunch_-_Learning_Image_Classifiers_Using_Annotation_Byproducts_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17595", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ong_Chaotic_World_A_Large_and_Challenging_Benchmark_for_Human_Behavior_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ding_MOSE_A_New_Dataset_for_Video_Object_Segmentation_in_Complex_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.01872", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Neuhaus_Spurious_Features_Everywhere_-_Large-Scale_Detection_of_Harmful_Spurious_Features_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.04871", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Saini_Chop__Learn_Recognizing_and_Generating_Object-State_Compositions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14339", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shao_Building_Bridge_Across_the_Time_Disruption_and_Restoration_of_Murals_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_HoloAssist_an_Egocentric_Human_Interaction_Dataset_for_Interactive_AI_Assistants_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.17024", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_SynBody_Synthetic_Dataset_with_Layered_Human_Models_for_3D_Human_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17368", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_OxfordTVG-HIC_Can_Machine_Make_Humorous_Captions_from_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11636", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zust_LaRS_A_Diverse_Panoptic_Maritime_Obstacle_Detection_Dataset_and_Benchmark_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09618", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Weng_Joint_Metrics_Matter_A_Better_Standard_for_Trajectory_Forecasting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.06292", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_LPFF_A_Portrait_Dataset_for_Face_Generators_Across_Large_Poses_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14407", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shapovalov_Replay_Multi-modal_Multi-view_Acted_Videos_for_Casual_Holography_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12067", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Human-centric_Scene_Understanding_for_3D_Large-scale_Scenarios_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14392", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nakamura_Pre-training_Vision_Transformers_with_Very_Limited_Synthesized_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14710", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gustafson_FACET_Fairness_in_Computer_Vision_Evaluation_Benchmark_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.00035", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_EmoSet_A_Large-scale_Visual_Emotion_Dataset_with_Rich_Attributes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07961", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_RenderIH_A_Large-Scale_Synthetic_Dataset_for_3D_Interacting_Hand_Pose_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.09301", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_TIFA_Accurate_and_Interpretable_Text-to-Image_Faithfulness_Evaluation_with_Question_Answering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11897", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sudhakar_Exploring_the_Sim2Real_Gap_Using_Digital_Twins_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_ClothesNet_An_Information-Rich_3D_Garment_Model_Repository_with_Simulated_Clothes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09987", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Video_State-Changing_Object_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_PlanarTrack_A_Large-scale_Challenging_Benchmark_for_Planar_Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.07625", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_AIDE_A_Vision-Driven_Multi-View_Multi-Modal_Multi-Tasking_Dataset_for_Assistive_Driving_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13933", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_Harvard_Glaucoma_Detection_and_Progression_A_Multimodal_Multitask_Dataset_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13411", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gong_ARNOLD_A_Benchmark_for_Language-Grounded_Task_Learning_with_Continuous_States_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.04321", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Khan_FishNet_A_Large-scale_Dataset_and_Benchmark_for_Fish_Recognition_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/An_Towards_Content-based_Pixel_Retrieval_in_Revisited_Oxford_and_Paris_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05438", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Deng_A_Large-scale_Study_of_Spatiotemporal_Representation_Learning_with_a_New_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13505", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fang_SQAD_Automatic_Smartphone_Camera_Quality_Assessment_and_Benchmarking_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Revisiting_Scene_Text_Recognition_A_Data_Perspective_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08723", @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hataya_Will_Large-scale_Generative_Models_Corrupt_Future_Datasets_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.08095", @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_360VOT_A_New_Benchmark_Dataset_for_Omnidirectional_Visual_Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14630", diff --git a/json_data/deep-learning-architectures-and-techniques.json b/json_data/deep-learning-architectures-and-techniques.json index 3e20d80..a9f453a 100644 --- a/json_data/deep-learning-architectures-and-techniques.json +++ b/json_data/deep-learning-architectures-and-techniques.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Aich_Efficient_Controllable_Multi-Task_Architectures_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11744", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_ParCNetV2_Oversized_Kernel_with_Enhanced_Attention_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.07157", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Unleashing_the_Power_of_Gradient_Signal-to-Noise_Ratio_for_Zero-Shot_NAS_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_MMST-ViT_Climate_Change-aware_Crop_Yield_Prediction_via_Multi-Modal_Spatial-Temporal_Vision_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Vasu_FastViT_A_Fast_Hybrid_Vision_Transformer_Using_Structural_Reparameterization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14189", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_IIEU_Rethinking_Neural_Feature_Activation_from_Decision-Making_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hyeon-Woo_Scratching_Visual_Transformers_Back_with_Uniform_Attention_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.08457", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_SpaceEvo_Hardware-Friendly_Search_Space_Design_for_Efficient_INT8_Inference_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08308", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_ElasticViT_Conflict-aware_Supernet_Training_for_Deploying_Fast_Vision_Transformer_on_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09730", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ryu_Gramian_Attention_Heads_are_Strong_yet_Efficient_Vision_Learners_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.16483", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_EfficientTrain_Exploring_Generalized_Curriculum_Learning_for_Training_Visual_Backbones_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.09703", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Ord2Seq_Regarding_Ordinal_Regression_as_Label_Sequence_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09004", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bai_Unified_Data-Free_Compression_Pruning_and_Quantization_without_Fine-Tuning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07209", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_LaPE_Layer-adaptive_Position_Embedding_for_Vision_Transformers_with_Independent_Layer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.05262", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Roy_Exemplar-Free_Continual_Transformer_with_Convolutions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11357", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Building_Vision_Transformers_with_Hierarchy_Aware_Feature_Aggregation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_ShiftNAS_Improving_One-shot_NAS_via_Probability_Shift_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08300", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Athwale_DarSwin_Distortion_Aware_Radial_Swin_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.09691", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_ROME_Robustifying_Memory-Efficient_NAS_via_Topology_Disentanglement_and_Gradient_Accumulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2011.11233", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_FDViT_Improve_the_Hierarchical_Architecture_of_Vision_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_FLatten_Transformer_Vision_Transformer_using_Focused_Linear_Attention_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.00442", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chu_MixPath_A_Unified_Approach_for_One-shot_Neural_Architecture_Search_ICCV_2023_paper.pdf", "paper_arxiv_id": "2001.05887", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_SSF_Accelerating_Training_of_Spiking_Neural_Networks_with_Stabilized_Spiking_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_Dynamic_Perceiver_for_Efficient_Visual_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.11248", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ren_SG-Former_Self-guided_Transformer_with_Evolving_Token_Reallocation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12216", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_Scale-Aware_Modulation_Meet_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08579", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Learning_to_Upsample_by_Learning_to_Sample_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.15085", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Peng_GET_Group_Event_Transformer_for_Event-Based_Vision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.02642", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Adaptive_Frequency_Filters_As_Efficient_Global_Token_Mixers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14008", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Fcaformer_Forward_Cross_Attention_in_Hybrid_Vision_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.07198", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qi_Dynamic_Snake_Convolution_Based_on_Topological_Geometric_Constraints_for_Tubular_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08388", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Khoshsirat_Sentence_Attention_Blocks_for_Answer_Grounding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11593", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Vo_MST-compression_Compressing_and_Accelerating_Binary_Neural_Networks_with_Minimum_Spanning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13735", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yun_EGformer_Equirectangular_Geometry-biased_Transformer_for_360_Depth_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.07803", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yun_SPANet_Frequency-balancing_Token_Mixer_using_Spectral_Pooling_Aggregation_Modulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11568", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_ModelGiF_Gradient_Fields_for_Model_Functional_Distance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11013", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hakim_ClusT3_Information_Invariant_Test-Time_Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.12345", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Cumulative_Spatial_Knowledge_Distillation_for_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08500", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Baek_Luminance-aware_Color_Transform_for_Multiple_Exposure_Correction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Meng_Towards_Memory-_and_Time-Efficient_Backpropagation_for_Training_Spiking_Neural_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.14311", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Michalkiewicz_Domain_Generalization_Guided_by_Gradient_Signal_to_Noise_Ratio_of_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.07361", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_DOT_A_Distillation-Oriented_Trainer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08436", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Extensible_and_Efficient_Proxy_for_Neural_Architecture_Search_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Singhal_Learning_to_Transform_for_Generalizable_Instance-wise_Invariance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16672", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kirchmeyer_Convolutional_Networks_with_Oriented_1D_Kernels_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.15812", diff --git a/json_data/document-analysis-and-understanding.json b/json_data/document-analysis-and-understanding.json index 56d23e9..37788c8 100644 --- a/json_data/document-analysis-and-understanding.json +++ b/json_data/document-analysis-and-understanding.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_A_Benchmark_for_Chinese-English_Scene_Text_Image_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03262", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Da_Vision_Grid_Transformer_for_Document_Layout_Analysis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14978", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guan_Self-Supervised_Character-to-Character_Distillation_for_Text_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.00288", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_ICL-D3IE_In-Context_Learning_with_Diverse_Demonstrations_Updating_for_Document_Information_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05063", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_ESTextSpotter_Towards_Better_Scene_Text_Spotting_with_Explicit_Synergy_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10147", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pan_Few_Shot_Font_Generation_Via_Transferring_Similarity_Guided_Global_Style_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.00827", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_Attention_Where_It_Matters_Rethinking_Visual_Document_Understanding_with_Selective_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01131", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Van_Landeghem_Document_Understanding_Dataset_and_Evaluation_DUDE_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.08455", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_LISTER_Neighbor_Decoding_for_Length-Insensitive_Scene_Text_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12774", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Morin_MolGrapher_Graph-based_Visual_Recognition_of_Chemical_Structures_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12234", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_SCOB_Universal_Text_Understanding_via_Character-wise_Supervised_Contrastive_Learning_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.12382", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Foreground_and_Text-lines_Aware_Document_Image_Rectification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liao_DocTr_Document_Transformer_for_Structured_Information_Extraction_in_Documents_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07929", diff --git a/json_data/efficient-and-scalable-vision.json b/json_data/efficient-and-scalable-vision.json index 52e6e7f..3df499e 100644 --- a/json_data/efficient-and-scalable-vision.json +++ b/json_data/efficient-and-scalable-vision.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tao_AdaNIC_Towards_Practical_Neural_Image_Compression_via_Dynamic_Transform_Routing_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Rethinking_Vision_Transformers_for_MobileNet_Size_and_Speed_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.08059", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Peng_DELFlow_Dense_Efficient_Learning_of_Scene_Flow_for_Large-Scale_Point_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04383", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dutson_Eventful_Transformers_Leveraging_Temporal_Redundancy_in_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13494", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yao_Inherent_Redundancy_in_Spiking_Neural_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08227", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yun_Achievement-Based_Training_Progress_Balancing_for_Multi-Task_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ding_Prune_Spatio-temporal_Tokens_by_Semantic-aware_Temporal_Accumulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04549", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Differentiable_Transportation_Pruning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08483", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ancilotto_XiNet_Efficient_Neural_Networks_for_tinyML_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Frumkin_Jumping_through_Local_Minima_Quantization_in_the_Loss_Landscape_of_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10814", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Colbert_A2Q_Accumulator-Aware_Quantization_with_Guaranteed_Overflow_Avoidance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13504v1", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Workie-Talkie_Accelerating_Federated_Learning_by_Overlapping_Computing_and_Communications_via_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_DenseShift_Towards_Accurate_and_Efficient_Low-Bit_Power-of-Two_Quantization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.09708", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nooralinejad_PRANC_Pseudo_RAndom_Networks_for_Compacting_Deep_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.08464", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Faghri_Reinforce_Data_Multiply_Impact_Improved_Model_Accuracy_and_Robustness_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08983", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Heitzinger_A_Fast_Unified_System_for_3D_Object_Detection_and_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Estimator_Meets_Equilibrium_Perspective_A_Rectified_Straight_Through_Estimator_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06689", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_I-ViT_Integer-only_Quantization_for_Efficient_Vision_Transformer_Inference_ICCV_2023_paper.pdf", "paper_arxiv_id": "2207.01405", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_EMQ_Evolving_Training-free_Proxies_for_Automated_Mixed_Precision_Quantization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10554", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cho_Local_or_Global_Selective_Knowledge_Assimilation_for_Federated_Learning_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08809", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sajedi_DataDAM_Efficient_Dataset_Distillation_with_Attention_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.00093", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dukler_SAFE_Machine_Unlearning_With_Shard_Graphs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.13169", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Abati_ResQ_Residual_Quantization_for_Video_Perception_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09511", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shoouri_Efficient_Computation_Sharing_for_Multi-Task_Visual_Scene_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09663", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Karimian_Essential_Matrix_Estimation_using_Convex_Relaxations_in_Orthogonal_Space_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fu_TripLe_Revisiting_Pretrained_Model_Reuse_and_Progressive_Learning_for_Efficient_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_DiffRate__Differentiable_Compression_Rate_for_Efficient_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.17997", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Bridging_Cross-task_Protocol_Inconsistency_for_Distillation_in_Dense_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14286", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_From_Knowledge_Distillation_to_Self-Knowledge_Distillation_A_Unified_Approach_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13005", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Robert_Efficient_3D_Semantic_Segmentation_with_Superpoint_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.08045", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Dataset_Quantization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10524", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jie_Revisiting_the_Parameter_Efficiency_of_Adapters_from_the_Perspective_of_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16867", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_RepQ-ViT_Scale_Reparameterization_for_Post-Training_Quantization_of_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.08254", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_Semantically_Structured_Image_Compression_via_Irregular_Group-Based_Decoupling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.02586", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_SeiT_Storage-Efficient_Vision_Training_with_Tokens_Using_1_of_Pixel_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11114", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_SMMix_Self-Motivated_Image_Mixing_for_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.12977", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Multi-Label_Knowledge_Distillation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06453", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ren_UGC_Unified_GAN_Compression_for_Efficient_Image-to-Image_Translation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.09310", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Parger_MotionDeltaCNN_Sparse_CNN_Inference_of_Frame_Differences_in_Moving_Camera_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.09887", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_EfficientViT_Lightweight_Multi-Scale_Attention_for_High-Resolution_Dense_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2205.14756", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_DREAM_Efficient_Dataset_Distillation_by_Representative_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.14416", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_INSTA-BNN_Binary_Neural_Network_with_INSTAnce-aware_Threshold_ICCV_2023_paper.pdf", "paper_arxiv_id": "2204.07439", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ni_Deep_Incubation_Training_Large_Models_by_Divide-and-Conquering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.04129", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_AdaMV-MoE_Adaptive_Multi-Task_Vision_Mixture-of-Experts_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Overcoming_Forgetting_Catastrophe_in_Quantization-Aware_Training_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xia_Window-Based_Early-Exit_Cascades_for_Uncertainty_Estimation_When_Deep_Ensembles_are_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08010", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Choi_ORC_Network_Group-based_Knowledge_Distillation_using_Online_Role_Change_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.01186", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_RMP-Loss_Regularizing_Membrane_Potential_Distribution_for_Spiking_Neural_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06787", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_Structural_Alignment_for_Network_Pruning_through_Partial_Regularization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Automated_Knowledge_Distillation_via_Monte_Carlo_Tree_Search_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shaker_SwiftFormer_Efficient_Additive_Attention_for_Transformer-based_Real-time_Mobile_Vision_Applications_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15446", @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shang_Causal-DFQ_Causality_Guided_Data-Free_Network_Quantization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.13682", @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Efficient_Joint_Optimization_of_Layer-Adaptive_Weight_Pruning_in_Deep_Neural_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10438", @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Automatic_Network_Pruning_via_Hilbert-Schmidt_Independence_Criterion_Lasso_under_Information_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_Distribution_Shift_Matters_for_Knowledge_Distillation_with_Webly_Collected_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11469", @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fang_FastRecon_Few-shot_Industrial_Anomaly_Detection_via_Fast_Feature_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_E2VPT_An_Effective_and_Efficient_Approach_for_Visual_Prompt_Tuning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13770", @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Bridging_Vision_and_Language_Encoders_Parameter-Efficient_Tuning_for_Referring_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11545", @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Girish_SHACIRA_Scalable_HAsh-grid_Compression_for_Implicit_Neural_Representations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.15848", @@ -1485,6 +1544,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Efficient_Deep_Space_Filling_Curve_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1510,6 +1570,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Q-Diffusion_Quantizing_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.04304", @@ -1535,6 +1596,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_Lossy_and_Lossless_L2_Post-training_Model_Size_Compression_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04269", @@ -1560,6 +1622,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Robustifying_Token_Attention_for_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11126", diff --git a/json_data/embodied-vision-active-agents-simulation.json b/json_data/embodied-vision-active-agents-simulation.json index 6695579..81d3f32 100644 --- a/json_data/embodied-vision-active-agents-simulation.json +++ b/json_data/embodied-vision-active-agents-simulation.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Skill_Transformer_A_Monolithic_Policy_for_Mobile_Manipulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09873", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kotar_ENTL_Embodied_Navigation_Trajectory_Learner_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02639", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_DREAMWALKER_Mental_Planning_for_Continuous_Vision-Language_Navigation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07498", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Singh_Scene_Graph_Contrastive_Learning_for_Embodied_Navigation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_Perpetual_Humanoid_Control_for_Real-time_Simulated_Avatars_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.06456", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Grounding_3D_Object_Affordance_from_2D_Interactions_in_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10437", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Krantz_Navigating_to_Objects_Specified_by_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01192", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhai_PEANUT_Predicting_and_Navigating_to_Unseen_Targets_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.02497", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Context-Aware_Planning_and_Environment-Aware_Memory_for_Instruction_Following_Embodied_Agents_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07241", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Learning_Foresightful_Dense_Visual_Affordance_for_Deformable_Object_Manipulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11057", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cancelli_Exploiting_Proximity-Aware_Tasks_for_Embodied_Social_Navigation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.00767", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Birds-Eye-View_Scene_Graph_for_Vision-Language_Navigation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04758", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_Active_Neural_Mapping_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16246", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Omnidirectional_Information_Gathering_for_Knowledge_Transfer-Based_Audio-Visual_Navigation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10306", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Marza_Multi-Object_Navigation_with_Dynamically_Learned_Neural_Implicit_Representations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.05129", diff --git a/json_data/explainable-ai-for-cv.json b/json_data/explainable-ai-for-cv.json index 70453f1..e73a4ce 100644 --- a/json_data/explainable-ai-for-cv.json +++ b/json_data/explainable-ai-for-cv.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Balasubramanian_Towards_Improved_Input_Masking_for_Convolutional_Neural_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.14646", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/van_der_Klis_PDiscoNet_Semantically_consistent_part_discovery_for_fine-grained_recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Srivastava_Corrupting_Neuron_Explanations_of_Deep_Visual_Features_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.16332", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rymarczyk_ICICLE_Interpretable_Class_Incremental_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.07811", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Upadhyay_ProbVLM_Probabilistic_Adapter_for_Frozen_Vison-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.00398", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hornauer_Out-of-Distribution_Detection_for_Monocular_Depth_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06072", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rao_Studying_How_to_Efficiently_and_Effectively_Guide_Models_with_Explanations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11932", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dravid_Rosetta_Neurons_Mining_the_Common_Units_in_a_Model_Zoo_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.09346", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/van_Noord_Protoype-based_Dataset_Comparison_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02401", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Learning_to_Identify_Critical_States_for_Reinforcement_Learning_from_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07795", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Stergiou_Leaping_Into_Memories_Space-Time_Deep_Feature_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09941", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_MAGI_Multi-Annotated_Explanation-Guided_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_SAFARI_Versatile_and_Efficient_Evaluations_for_Robustness_of_Interpretability_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.09418", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Do_DALL-E_and_Flamingo_Understand_Each_Other_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.12249", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Evaluation_and_Improvement_of_Interpretability_for_Self-Explainable_Part-Prototype_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.05946", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_MoreauGrad_Sparse_and_Robust_Interpretation_of_Neural_Networks_via_Moreau_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.05294", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yao_Towards_Understanding_the_Generalization_of_Deepfake_Detectors_from_a_Game-Theoretical_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Counterfactual-based_Saliency_Map_Towards_Visual_Contrastive_Explanations_for_Neural_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jeon_Beyond_Single_Path_Integrated_Gradients_for_Reliable_Input_Attribution_via_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Learning_Support_and_Trivial_Prototypes_for_Interpretable_Image_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.04011", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Barkan_Visual_Explanations_via_Iterated_Integrated_Attributions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.18585", diff --git a/json_data/faces-and-gestures.json b/json_data/faces-and-gestures.json index 30ae585..ca10411 100644 --- a/json_data/faces-and-gestures.json +++ b/json_data/faces-and-gestures.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nakamura_DeePoint_Visual_Pointing_Recognition_and_Direction_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06977", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Contactless_Pulse_Estimation_Leveraging_Pseudo_Labels_and_Self-Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_Most_Important_Person-Guided_Dual-Branch_Cross-Patch_Attention_for_Group_Affect_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.07055", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_ContactGen_Generative_Contact_Modeling_for_Grasp_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.03740", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Thambiraja_Imitator_Personalized_Speech-driven_3D_Facial_Animation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.00023", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_DVGaze_Dual-View_Gaze_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10310", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dan_TransFace_Calibrating_Transformer_Training_for_Face_Recognition_from_a_Data-Centric_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10133", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Towards_Unsupervised_Domain_Generalization_for_Face_Anti-Spoofing_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ren_Reinforced_Disentanglement_for_Face_Swapping_without_Skip_Connection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07928", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiao_CoSign_Exploring_Co-occurrence_Signals_in_Skeleton-based_Continuous_Sign_Language_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Peng_EmoTalk_Speech-Driven_Emotional_Disentanglement_for_3D_Face_Animation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11089", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_LA-Net_Landmark-Aware_Learning_for_Reliable_Facial_Expression_Recognition_under_Label_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09023", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_ASM_Adaptive_Skinning_Model_for_High-Quality_3D_Face_Modeling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.09423", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ou_Troubleshooting_Ethnic_Quality_Bias_with_Curriculum_Domain_Adaptation_for_Face_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_UniFace_Unified_Cross-Entropy_Loss_for_Deep_Face_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Human_Part-wise_3D_Motion_Context_Learning_for_Sign_Language_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09305", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Weakly-Supervised_Text-Driven_Contrastive_Learning_for_Facial_Behavior_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.00058", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_HaMuCo_Hand_Pose_Estimation_via_Multiview_Collaborative_Self-Supervised_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.00988", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_ReactioNet_Learning_High-Order_Facial_Behavior_from_Universal_Stimulus-Reaction_by_Dyadic_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shen_CLIP-Cluster_CLIP-Guided_Attribute_Hallucination_for_Face_Clustering_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Learning_Human_Dynamics_in_Autonomous_Driving_Scenarios_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhi_LivelySpeaker_Towards_Semantic-Aware_Co-Speech_Gesture_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.09294", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Controllable_Guide-Space_for_Generalizable_Face_Forgery_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14039", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_Unpaired_Multi-domain_Attribute_Translation_of_3D_Facial_Shapes_with_a_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13245", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_Emotional_Listener_Portrait_Neural_Listener_Head_Generation_with_Emotion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.00068", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nair_Steered_Diffusion_A_Generalized_Framework_for_Plug-and-Play_Conditional_Image_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.00224", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Invariant_Feature_Regularization_for_Fair_Face_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.14652", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Gloss-Free_Sign_Language_Translation_Improving_from_Visual-Language_Pretraining_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14768", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Contrastive_Pseudo_Learning_for_Open-World_DeepFake_Attribution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11132", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ahuja_Continual_Learning_for_Personalized_Co-speech_Gesture_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_HandR2N2_Iterative_3D_Hand_Pose_Estimation_Using_a_Residual_Recurrent_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gururani_SPACE_Speech-driven_Portrait_Animation_with_Controllable_Expression_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.09809", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sevastopolskiy_How_to_Boost_Face_Recognition_with_StyleGAN_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.10090", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": "https://zenodo.org/record/8252535", + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tafasca_ChildPlay_A_New_Benchmark_for_Understanding_Childrens_Gaze_Behaviour_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.01630", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Oorloff_Robust_One-Shot_Face_Video_Re-enactment_using_Hybrid_Latent_Spaces_of_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.07848", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Aich_Data-Free_Class-Incremental_Hand_Gesture_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Learning_Robust_Representations_with_Information_Bottleneck_and_Memory_Network_for_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Knowledge-Spreader_Learning_Semi-Supervised_Facial_Action_Dynamics_by_Consistifying_Knowledge_Granularity_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Face_Clustering_via_Graph_Convolutional_Networks_with_Confidence_Edges_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_StyleGANEX_StyleGAN-Based_Manipulation_Beyond_Cropped_Aligned_Faces_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06146", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Larue_SeeABLE_Soft_Discrepancies_and_Bounded_Contrastive_Learning_for_Exposing_Deepfakes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.11296", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Adaptive_Nonlinear_Latent_Transformation_for_Conditional_Face_Editing_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07790", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Semi-supervised_Speech-driven_3D_Facial_Animation_via_Cross-modal_Encoding_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_ICD-Face_Intra-class_Compactness_Distillation_for_Face_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_C2ST_Cross-Modal_Contextualized_Sequence_Transduction_for_Continuous_Sign_Language_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/fairness-privacy-ethics-social-good-transparency-accountability-in-vision.json b/json_data/fairness-privacy-ethics-social-good-transparency-accountability-in-vision.json index 631df09..4cfb3b6 100644 --- a/json_data/fairness-privacy-ethics-social-good-transparency-accountability-in-vision.json +++ b/json_data/fairness-privacy-ethics-social-good-transparency-accountability-in-vision.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wan_Enhancing_Privacy_Preservation_in_Federated_Learning_via_Learning_Rate_Perturbation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_TARGET_Federated_Class-Continual_Learning_via_Exemplar-Free_Distillation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06937", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yenamandra_FACTS_First_Amplify_Correlations_and_Then_Slice_to_Discover_Bias_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.17430", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Computation_and_Data_Efficient_Backdoor_Attacks_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zeng_Global_Balanced_Experts_for_Federated_Long-Tailed_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Peng_Source-free_Domain_Adaptive_Human_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03202", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Meister_Gender_Artifacts_in_Visual_Datasets_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.09191", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_FRAug_Tackling_Federated_Learning_with_Non-IID_Features_via_Representation_Augmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2205.14900", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ghodsi_zPROBE_Zero_Peek_Robustness_Checks_for_Federated_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.12100", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ko_Practical_Membership_Inference_Attacks_Against_Large-Scale_Multi-Modal_Models_A_Pilot_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.00108", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_FedPD_Federated_Open_Set_Recognition_with_Parameter_Disentanglement_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_MUter_Machine_Unlearning_on_Adversarially_Trained_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Thong_Beyond_Skin_Tone_A_Multidimensional_Measure_of_Apparent_Skin_Color_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05148", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Brinkmann_A_Multidimensional_Analysis_of_Social_Biases_in_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.01948", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Partition-And-Debias_Agnostic_Biases_Mitigation_via_a_Mixture_of_Biases-Specific_Experts_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10005", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Rethinking_Data_Distillation_Do_Not_Overlook_Calibration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12463", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nahon_Mining_bias-target_Alignment_from_Voronoi_Cells_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.03691", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chiu_Better_May_Not_Be_Fairer_A_Study_on_Subgroup_Discrepancy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.08649", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fang_GIFD_A_Generative_Gradient_Inversion_Method_with_Feature_Domain_Optimization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04699", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_Benchmarking_Algorithmic_Bias_in_Face_Recognition_An_Experimental_Approach_Using_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05441", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_FedPerfix_Towards_Partial_Model_Personalization_of_Vision_Transformers_in_Federated_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09160", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_Towards_Attack-tolerant_Federated_Learning_via_Critical_Parameter_Analysis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09318", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_What_can_Discriminator_do_Towards_Box-free_Ownership_Verification_of_Generative_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15860", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fang_Robust_Heterogeneous_Federated_Learning_under_Data_Corruption_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Communication-efficient_Federated_Learning_with_Single-Step_Synthetic_Features_Compressor_for_Faster_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.13562", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_GPFL_Simultaneously_Learning_Global_and_Personalized_Feature_Information_for_Personalized_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10279", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zeng_MPCViT_Searching_for_Accurate_and_Efficient_MPC-Friendly_Vision_Transformer_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.13955", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Metzen_Identification_of_Systematic_Errors_of_Image_Classifiers_on_Rare_Subgroups_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05072", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shvai_Adaptive_Image_Anonymization_in_the_Context_of_Image_Classification_with_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Vahidian_When_Do_Curricula_Work_in_Federated_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.12712", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Domain_Specified_Optimization_for_Deployment_Authorization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_STPrivacy_Spatio-Temporal_Privacy-Preserving_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.03046", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_SAL-ViT_Towards_Latency_Efficient_Private_Inference_on_ViT_using_Selective_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Generative_Gradient_Inversion_via_Over-Parameterized_Networks_in_Federated_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Basu_Inspecting_the_Geographical_Representativeness_of_Images_from_Text-to-Image_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.11080", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wen_Divide_and_Conquer_a_Two-Step_Method_for_High_Quality_Face_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Exploring_the_Benefits_of_Visual_Prompting_in_Differential_Privacy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12247", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Towards_Fairness-aware_Adversarial_Network_Pruning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Peng_AutoReP_Automatic_ReLU_Replacement_for_Fast_Private_Network_Inference_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10134", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Flatness-Aware_Minimization_for_Domain_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11108", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Communication-Efficient_Vertical_Federated_Learning_with_Limited_Overlapping_Samples_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16270", diff --git a/json_data/first-person-egocentric-vision.json b/json_data/first-person-egocentric-vision.json index 5601829..024a62f 100644 --- a/json_data/first-person-egocentric-vision.json +++ b/json_data/first-person-egocentric-vision.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Radevski_Multimodal_Distillation_for_Egocentric_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07483", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Akiva_Self-Supervised_Object_Detection_from_Egocentric_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mur-Labadia_Multi-label_Affordance_Mapping_from_Egocentric_Vision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02120", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Ego-Only_Egocentric_Action_Detection_without_Exocentric_Transferring_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.01380", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pan_COPILOT_Human-Environment_Collision_Prediction_and_Localization_from_Egocentric_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.01781", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_EgoPCA_A_New_Framework_for_Egocentric_Hand-Object_Interaction_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02423", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pramanick_EgoVLPv2_Egocentric_Video-Language_Pre-training_with_Fusion_in_the_Backbone_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.05463", diff --git a/json_data/generative-ai.json b/json_data/generative-ai.json index 3714cb2..cb18635 100644 --- a/json_data/generative-ai.json +++ b/json_data/generative-ai.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Vinker_CLIPascene_Scene_Sketching_with_Different_Types_and_Levels_of_Abstraction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.17256", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/PNVR_LD-ZNet_A_Latent_Diffusion_Approach_for_Text-Based_Image_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12343", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_TexFusion_Synthesizing_3D_Textures_with_Text-Guided_Image_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.13772", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_NeuRBF_A_Neural_Fields_Representation_with_Adaptive_Radial_Basis_Functions_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Peebles_Scalable_Diffusion_Models_with_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.09748", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Texture_Generation_on_3D_Meshes_with_Point-UV_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10490", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chan_Generative_Novel_View_Synthesis_with_3D-Aware_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02602", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_DiffFit_Unlocking_Transferability_of_Large_Diffusion_Models_via_Simple_Parameter-efficient_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06648", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sargent_VQ3D_Learning_a_3D-Aware_Generative_Model_on_ImageNet_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.06833", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ge_Ref-NeuS_Ambiguity-Reduced_Neural_Implicit_Surface_Learning_for_Multi-View_Reconstruction_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10840", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pandey_A_Complete_Recipe_for_Diffusion_Generative_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.01748", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhong_MMVP_Motion-Matrix-Based_Video_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16154", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Cross-Ray_Neural_Radiance_Fields_for_Novel-View_Synthesis_from_Unconstrained_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08093", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pan_Effective_Real_Image_Editing_with_Accelerated_Iterative_Diffusion_Inversion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04907", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_Simulating_Fluids_in_Real-World_Still_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2204.11335", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/QI_FateZero_Fusing_Attentions_for_Zero-shot_Text-based_Video_Editing_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09535", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/ELITE-library/ELITE", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_ELITE_Encoding_Visual_Concepts_into_Textual_Embeddings_for_Customized_Text-to-Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.13848", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/PAIR/Text2Video-Zero", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Khachatryan_Text2Video-Zero_Text-to-Image_Diffusion_Models_are_Zero-Shot_Video_Generators_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13439", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Chupa_Carving_3D_Clothed_Humans_from_Skinned_Shape_Priors_using_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.11870", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Holmquist_DiffPose_Multi-hypothesis_Human_Pose_Estimation_using_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.16487", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ju_HumanSD_A_Native_Skeleton-Guided_Diffusion_Model_for_Human_Image_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.04269", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tanaka_Role-Aware_Interaction_Generation_from_Textual_Description_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yuan_PhysDiff_Physics-Guided_Human_Motion_Diffusion_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.02500", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Forward_Flow_for_Novel_View_Synthesis_of_Dynamic_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.17390", diff --git a/json_data/geometric-deep-learning.json b/json_data/geometric-deep-learning.json index f943e9f..ba0dc27 100644 --- a/json_data/geometric-deep-learning.json +++ b/json_data/geometric-deep-learning.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Get_the_Best_of_Both_Worlds_Improving_Accuracy_and_Transferability_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.01547", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_4D_Panoptic_Segmentation_as_Invariant_and_Equivariant_Field_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15651", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gleize_SiLK_Simple_Learned_Keypoints_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06194", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zohaib_SC3K_Self-supervised_and_Coherent_3D_Keypoints_Estimation_from_Rotated_Noisy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05410", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Min_Geometric_Viewpoint_Learning_with_Hyper-Rays_and_Harmonics_Encoding_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Surface_Extraction_from_Neural_Unsigned_Distance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08878", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Saha_Learning_Adaptive_Neighborhoods_for_Graph_Neural_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09065", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Why_do_networks_have_inhibitorynegative_connections_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.03211", diff --git a/json_data/human-in-the-loop-computer-vision.json b/json_data/human-in-the-loop-computer-vision.json index 4cde975..33ba096 100644 --- a/json_data/human-in-the-loop-computer-vision.json +++ b/json_data/human-in-the-loop-computer-vision.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_Knowledge-Aware_Federated_Active_Learning_with_Non-IID_Data_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.13579", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_SimpleClick_Interactive_Image_Segmentation_with_Simple_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.11006", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_InterFormer_Real-time_Interactive_Image_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02942", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Interactive_Class-Agnostic_Object_Counting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05277", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Stretcu_Agile_Modeling_From_Concept_to_Classifier_in_Minutes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.12948", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kye_TiDAL_Learning_Training_Dynamics_for_Active_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.06788", diff --git a/json_data/human-poseshape-estimation.json b/json_data/human-poseshape-estimation.json index e5f858d..a7c87ff 100644 --- a/json_data/human-poseshape-estimation.json +++ b/json_data/human-poseshape-estimation.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kaufmann_EMDB_The_Electromagnetic_Database_of_Global_3D_Human_Pose_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16894", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_ReFit_Recurrent_Fitting_Network_for_3D_Human_Recovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11184", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chai_Global_Adaptation_Meets_Local_Generalization_Unsupervised_Domain_Adaptation_for_3D_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16456", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tse_Spectral_Graphormer_Spectral_Graph-Based_Transformer_for_Egocentric_Two-Hand_Reconstruction_using_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11015", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Realistic_Full-Body_Tracking_from_Sparse_Observations_via_Joint-Level_Modeling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08855", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Rethinking_Pose_Estimation_in_Crowds_Overcoming_the_Detection_Information_Bottleneck_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.07879", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xing_HDG-ODE_A_Hierarchical_Continuous-Time_Model_for_Human_Pose_Forecasting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jian_AffordPose_A_Large-Scale_Dataset_of_Hand-Object_Interactions_with_Affordance-Driven_Hand_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08942", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_PhaseMP_Robust_3D_Pose_Estimation_via_Phase-conditioned_Human_Motion_Prior_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Synthesizing_Diverse_Human_Motions_in_3D_Indoor_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.12411", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Choudhury_TEMPO_Efficient_Multi-View_Pose_Estimation_Tracking_and_Forecasting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.07910", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shan_Diffusion-Based_3D_Human_Pose_Estimation_with_Multi-Hypothesis_Aggregation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11579", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_Towards_Robust_and_Smooth_3D_Multi-Person_Pose_Estimation_from_Monocular_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08644", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/brjathu/HMR2.0", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Goel_Humans_in_4D_Reconstructing_and_Tracking_Humans_with_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.20091", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Su_NPC_Neural_Point_Characters_from_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02013", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kong_Priority-Centric_Human_Motion_Generation_in_Discrete_Latent_Space_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14480", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_NCHO_Unsupervised_Learning_for_Neural_3D_Composition_of_Humans_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.14345", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nam_Cyclic_Test-Time_Adaptation_on_Monocular_Video_for_3D_Human_Mesh_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06554", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_MHEntropy_Entropy_Meets_Multiple_Hypotheses_for_Pose_and_Shape_Recovery_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Probabilistic_Triangulation_for_Uncalibrated_Multi-View_3D_Human_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04756", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_DiffPose_SpatioTemporal_Diffusion_Model_for_Video-Based_Human_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16687", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Reconstructing_Groups_of_People_with_Hypergraph_Relational_Reasoning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.15844", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_MixSynthFormer_A_Transformer_Encoder-like_Structure_with_Mixed_Synthetic_Self-attention_for_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Leng_Dynamic_Hyperbolic_Attention_Network_for_Fine_Hand-object_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02965", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Human_from_Blur_Human_Pose_Tracking_from_Blurry_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17209", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_AG3D_Learning_to_Generate_3D_Avatars_from_2D_Image_Collections_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.02312", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_InterDiff_Generating_3D_Human-Object_Interactions_with_Physics-Informed_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16905", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_SEFD_Learning_to_Distill_Complex_Pose_and_Occlusion_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_3D_Human_Mesh_Recovery_with_Sequentially_Global_Rotation_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/You_Co-Evolution_of_Pose_and_Mesh_for_3D_Human_Body_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10305", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_PHRIT_Parametric_Hand_Representation_with_Implicit_Template_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14916", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhai_HopFIR_Hop-wise_GraphFormer_with_Intragroup_Joint_Refinement_for_3D_Human_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.14581", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Raychaudhuri_Prior-guided_Source-free_Domain_Adaptation_for_Human_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13954", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dai_Cloth2Body_Generating_3D_Human_Body_Mesh_from_2D_Clothing_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16189", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Delmas_PoseFix_Correcting_3D_Human_Poses_with_Natural_Language_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08480", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Group_Pose_A_Simple_Baseline_for_End-to-End_Multi-Person_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07313", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Azadi_Make-An-Animation_Large-Scale_Text-conditional_3D_Human_Motion_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.09662", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xue_NSF_Neural_Surface_Fields_for_Human_Modeling_from_Monocular_Depth_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14847", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pi_Hierarchical_Generation_of_Human-Object_Interactions_with_Diffusion_Probabilistic_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jang_Dynamic_Mesh_Recovery_from_Partial_Point_Cloud_Sequence_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_MotionBERT_A_Unified_Perspective_on_Learning_Human_Motion_Representations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.06551", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qu_Novel-View_Synthesis_and_Pose_Estimation_for_Hand-Object_Interaction_from_Sparse_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11198", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_OCHID-Fi_Occlusion-Robust_Hand_Pose_Estimation_in_3D_via_RF-Vision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10146", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Neural_Interactive_Keypoint_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10174", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bramlage_Plausible_Uncertainties_for_Human_Pose_Regression_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dou_TORE_Token_Reduction_for_Efficient_Human_Mesh_Recovery_with_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.10705", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Weakly-supervised_3D_Pose_Transfer_with_Keypoints_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13459", diff --git a/json_data/humans-3d-modeling-and-driving.json b/json_data/humans-3d-modeling-and-driving.json index 771e401..c5357ef 100644 --- a/json_data/humans-3d-modeling-and-driving.json +++ b/json_data/humans-3d-modeling-and-driving.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hollein_Text2Room_Extracting_Textured_3D_Meshes_from_2D_Text-to-Image_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11989", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Stier_LivePose_Online_3D_Reconstruction_from_Monocular_Video_with_Dynamic_Camera_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.00054", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shao_NDDepth_Normal-Distance_Assisted_Monocular_Depth_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.10592", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_LATR_3D_Lane_Detection_from_Monocular_Images_with_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04583", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jia_DriveAdapter_Breaking_the_Coupling_Barrier_of_Perception_and_Planning_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.00398", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Prokudin_Dynamic_Point_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02626", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_Generalizing_Neural_Human_Fitting_to_Unseen_Poses_With_Articulated_SE3_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.10528", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Probabilistic_Human_Mesh_Recovery_in_3D_Scenes_from_Egocentric_Views_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06024", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tripathi_DECO_Dense_Estimation_of_3D_Human-Scene_Contact_In_The_Wild_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ren_Decoupled_Iterative_Refinement_Framework_for_Interacting_Hands_Reconstruction_from_a_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.02410", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Heinrich_Chasing_Clouds_Differentiable_Volumetric_Rasterisation_of_Point_Clouds_as_a_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_Rehearsal-Free_Domain_Continual_Face_Anti-Spoofing_Generalize_More_and_Forget_Less_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09914", diff --git a/json_data/image-and-video-forensics.json b/json_data/image-and-video-forensics.json index 204f45f..7fb26a5 100644 --- a/json_data/image-and-video-forensics.json +++ b/json_data/image-and-video-forensics.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Pre-Training-Free_Image_Manipulation_Localization_through_Non-Mutually_Exclusive_Contrastive_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14900", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Black_VADER_Video_Alignment_Differencing_and_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13193", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Deng_PIRNet_Privacy-Preserving_Image_Restoration_Network_via_Wavelet_Lifting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Le_Quality-Agnostic_Deepfake_Detection_with_Intra-model_Collaborative_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05911", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhai_Towards_Generic_Image_Manipulation_Detection_with_Weakly-Supervised_Self-Consistency_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01246", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_UCF_Uncovering_Common_Features_for_Generalizable_Deepfake_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.13949", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_SAFL-Net_Semantic-Agnostic_Feature_Learning_Network_with_Auxiliary_Plugins_for_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_DRAW_Defending_Camera-shooted_RAW_Against_Image_Manipulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16418", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_DIRE_for_Diffusion-Generated_Image_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09295", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ji_Uncertainty-guided_Learning_for_Improving_Image_Manipulation_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fernandez_The_Stable_Signature_Rooting_Watermarks_in_Latent_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15435", diff --git a/json_data/image-and-video-synthesis.json b/json_data/image-and-video-synthesis.json index 1967e53..dc4edc6 100644 --- a/json_data/image-and-video-synthesis.json +++ b/json_data/image-and-video-synthesis.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Text-Driven_Generative_Domain_Adaptation_with_Spectral_Consistency_Regularization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Silver_MosaiQ_Quantum_Generative_Adversarial_Networks_for_Image_Generation_on_NISQ_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11096", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_Controllable_Visual-Tactile_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.03051", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Orgad_Editing_Implicit_Assumptions_in_Text-to-Image_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08084", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Svitov_DINAR_Diffusion_Inpainting_of_Neural_Textures_for_One-Shot_Human_Avatars_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09375", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sushko_Smoothness_Similarity_Regularization_for_Few-Shot_GAN_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09717", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_HSR-Diff_Hyperspectral_Image_Super-Resolution_via_Conditional_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.12085", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Long-Term_Photometric_Consistent_Novel_View_Synthesis_with_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.10700", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_AutoDiffusion_Training-Free_Optimization_of_Time_Steps_and_Architectures_for_Automated_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.10438", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Collecting_The_Puzzle_Pieces_Disentangled_Self-Driven_Human_Pose_Transfer_by_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.01887", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Naveh_Multi-Directional_Subspace_Editing_in_Style-Space_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.11825", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bounareli_HyperReenact_One-Shot_Reenactment_via_Jointly_Learning_to_Refine_and_Retarget_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10797", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Generating_Realistic_Images_from_In-the-wild_Sounds_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02405", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bahmani_CC3D_Layout-Conditioned_Generation_of_Compositional_3D_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12074", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jain_UMFuse_Unified_Multi_View_Fusion_for_Human_Editing_Applications_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.10157", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Evaluating_Data_Attribution_for_Text-to-Image_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.09345", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Neural_Characteristic_Function_Learning_for_Conditional_Image_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_WaveIPT_Joint_Attention_and_Flow_Alignment_in_the_Wavelet_domain_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_LayoutDiffusion_Improving_Graphic_Layout_Generation_by_Discrete_Diffusion_Probabilistic_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11589", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_Human-Inspired_Facial_Sketch_Synthesis_with_Dynamic_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.00216", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ozkan_Conceptual_and_Hierarchical_Latent_Space_Decomposition_for_Face_Editing_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jeon_Improving_Diversity_in_Zero-Shot_GAN_Adaptation_with_Semantic_Variations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10554", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shin_BallGAN_3D-aware_Image_Synthesis_with_a_Spherical_Background_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.09091", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wallace_End-to-End_Diffusion_Latent_Optimization_Improves_Classifier_Guidance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13703", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Siyao_Deep_Geometrized_Cartoon_Line_Inbetweening_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16643", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fu_UnitedHuman_Harnessing_Multi-Source_Data_for_High-Resolution_Human_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14335", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Towards_Authentic_Face_Restoration_with_Iterative_Diffusion_Models_and_Beyond_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08996", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_SVDiff_Compact_Parameter_Space_for_Diffusion_Fine-Tuning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11305", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sargsyan_MI-GAN_A_Simple_Baseline_for_Image_Inpainting_on_Mobile_Devices_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Esser_Structure_and_Content-Guided_Video_Synthesis_with_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.03011", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/YuxinJ/Scenimefy", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Scenimefy_Learning_to_Craft_Anime_Scene_via_Semi-Supervised_Image-to-Image_Translation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12968", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_Efficient-VQGAN_Towards_High-Resolution_Image_Generation_with_Efficient_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.05400", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_A_Latent_Space_of_Stochastic_Diffusion_Models_for_Zero-Shot_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kumar_Generative_Multiplane_Neural_Radiance_for_3D-Aware_Image_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01172", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nie_Parallax-Tolerant_Unsupervised_Deep_Image_Stitching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.08207", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_GAIT_Generating_Aesthetic_Indoor_Tours_with_Deep_Reinforcement_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dastjerdi_EverLight_Indoor-Outdoor_Editable_HDR_Lighting_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.13207", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Prompt_Tuning_Inversion_for_Text-driven_Image_Editing_Using_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.04441", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hang_Efficient_Diffusion_Training_via_Min-SNR_Weighting_Strategy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09556", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_BoxDiff_Text-to-Image_Synthesis_with_Training-Free_Box-Constrained_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10816", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hong_Improving_Sample_Quality_of_Diffusion_Models_Using_Self-Attention_Guidance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.00939", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Not_All_Steps_are_Created_Equal_Selective_Diffusion_Distillation_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08448", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Niu_Deep_Image_Harmonization_with_Learnable_Augmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.00376", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Out-of-Domain_GAN_Inversion_via_Invertibility_Decomposition_for_Photo-Realistic_Human_Face_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.09262", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Bidirectionally_Deformable_Motion_Modulation_For_Video-based_Human_Pose_Transfer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07754", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Size_Does_Matter_Size-aware_Virtual_Try-on_via_Clothing-oriented_Transformation_Try-on_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ali_VidStyleODE_Disentangled_Video_Editing_via_StyleGAN_and_NeuralODEs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06020", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shen_Learning_Global-aware_Kernel_for_Image_Harmonization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.11676", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/songweig/rich-text-to-image", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ge_Expressive_Text-to-Image_Generation_with_Rich_Text_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06720", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_A_Large-Scale_Outdoor_Multi-Modal_Dataset_and_Benchmark_for_Novel_View_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.06782", @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Efficient_Region-Aware_Neural_Radiance_Fields_for_High-Fidelity_Talking_Portrait_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09323", @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Perceptual_Artifacts_Localization_for_Image_Synthesis_Tasks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.05590", @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_Learning_to_Generate_Semantic_Layouts_for_Higher_Text-Image_Correspondence_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08157", @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_StylerDALLE_Language-Guided_Style_Transfer_Using_a_Vector-Quantized_Tokenizer_of_a_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09268", @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chung_Shortcut-V2V_Compression_Framework_for_Video-to-Video_Translation_Based_on_Temporal_Redundancy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08011", @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/Tune-A-Video-library/Tune-A-Video-Training-UI", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Tune-A-Video_One-Shot_Tuning_of_Image_Diffusion_Models_for_Text-to-Video_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.11565", @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shiohara_BlendFace_Re-designing_Identity_Encoders_for_Face-Swapping_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10854", @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Talking_Head_Generation_with_Probabilistic_Audio-to-Visual_Diffusion_Priors_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.04248", @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_LinkGAN_Linking_GAN_Latents_to_Pixels_for_Controllable_Image_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.04604", @@ -1485,6 +1544,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Open-vocabulary_Object_Segmentation_with_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.05221", @@ -1510,6 +1570,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_StyleDiffusion_Controllable_Disentangled_Style_Transfer_via_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07863", @@ -1535,6 +1596,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gong_ToonTalker_Cross-Domain_Face_Reenactment_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12866", @@ -1560,6 +1622,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Dense_Text-to-Image_Generation_with_Attention_Modulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12964", @@ -1585,6 +1648,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_Householder_Projector_for_Unsupervised_Latent_Semantics_Discovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08012", @@ -1610,6 +1674,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Niu_Deep_Image_Harmonization_with_Globally_Guided_Feature_Transformation_and_Relation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.00356", @@ -1635,6 +1700,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_One-Shot_Generative_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2111.09876", @@ -1660,6 +1726,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chan_Hashing_Neural_Video_Decomposition_with_Multiplicative_Residuals_in_Space-Time_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14022", @@ -1685,6 +1752,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/shi-labs/Versatile-Diffusion", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Versatile_Diffusion_Text_Images_and_Variations_All_in_One_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.08332", @@ -1710,6 +1778,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Harnessing_the_Spatial-Temporal_Attention_of_Diffusion_Models_for_High-Fidelity_Text-to-Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.03869", @@ -1735,6 +1804,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_FreeDoM_Training-Free_Energy-Guided_Conditional_Diffusion_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09833", @@ -1760,6 +1830,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/TencentARC/MasaCtrl", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_MasaCtrl_Tuning-Free_Mutual_Self-Attention_Control_for_Consistent_Image_Synthesis_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.08465", @@ -1785,6 +1856,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Personalized_Image_Generation_for_Color_Vision_Deficiency_Population_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1810,6 +1882,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_ReNeRF_Relightable_Neural_Radiance_Fields_with_Nearfield_Lighting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1835,6 +1908,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_MagicFusion_Boosting_Text-to-Image_Generation_Performance_by_Fusing_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13126", @@ -1860,6 +1934,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_PODIA-3D_Domain_Adaptation_of_3D_Generative_Model_Across_Large_Domain_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01900", @@ -1885,6 +1960,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Pluralistic_Aging_Diffusion_Autoencoder_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11086", @@ -1910,6 +1986,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_DPM-OT_A_New_Diffusion_Probabilistic_Model_Based_on_Optimal_Transport_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11308", @@ -1935,6 +2012,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gan_Efficient_Emotional_Adaptation_for_Audio-Driven_Talking-Head_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04946", @@ -1960,6 +2038,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ponglertnapakorn_DiFaReli_Diffusion_Face_Relighting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.09479", @@ -1985,6 +2064,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_TALL_Thumbnail_Layout_for_Deepfake_Video_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07494", @@ -2010,6 +2090,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_LAW-Diffusion_Complex_Scene_Generation_by_Diffusion_with_Layouts_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06713", @@ -2035,6 +2116,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Karras_DreamPose_Fashion_Video_Synthesis_with_Stable_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06025", @@ -2060,6 +2142,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/nupurkmr9/concept-ablation", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kumari_Ablating_Concepts_in_Text-to-Image_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13516", @@ -2085,6 +2168,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_DReg-NeRF_Deep_Registration_for_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09386", @@ -2110,6 +2194,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_The_Euclidean_Space_is_Evil_Hyperbolic_Attribute_Editing_for_Few-shot_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.12347", @@ -2135,6 +2220,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Schwartz_Discriminative_Class_Tokens_for_Text-to-Image_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17155", @@ -2160,6 +2246,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_General_Image-to-Image_Translation_with_One-Shot_Image_Guidance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14352", @@ -2185,6 +2272,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Text2Performer_Text-Driven_Human_Video_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.08483", @@ -2210,6 +2298,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hong_AesPA-Net_Aesthetic_Pattern-Aware_Style_Transfer_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09724", @@ -2235,6 +2324,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_Controllable_Person_Image_Synthesis_with_Pose-Constrained_Latent_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2260,6 +2350,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Motamed_PATMAT_Person_Aware_Tuning_of_Mask-Aware_Transformer_for_Face_Inpainting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06107", @@ -2285,6 +2376,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Virtual_Try-On_with_Pose-Garment_Keypoints_Guided_Inpainting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2310,6 +2402,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Online_Clustered_Codebook_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15139", @@ -2335,6 +2428,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_InfiniCity_Infinite-Scale_City_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.09637", @@ -2360,6 +2454,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_Make-It-3D_High-fidelity_3D_Creation_from_A_Single_Image_with_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14184", @@ -2385,6 +2480,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_SAMPLING_Scene-adaptive_Hierarchical_Multiplane_Images_Representation_for_Novel_View_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.06323", @@ -2410,6 +2506,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ki_StyleLipSync_Style-based_Personalized_Lip-sync_Video_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.00521", @@ -2435,6 +2532,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_StyleInV_A_Temporal_Style_Modulated_Inversion_Network_for_Unconditional_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16909", @@ -2460,6 +2558,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jo_3D-Aware_Generative_Model_for_Improved_Side-View_Image_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.10388", @@ -2485,6 +2584,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Zero-Shot_Contrastive_Loss_for_Text-Guided_Diffusion_Image_Style_Transfer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08622", @@ -2510,6 +2610,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Seo_FlipNeRF_Flipped_Reflection_Rays_for_Few-shot_Novel_View_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.17723", @@ -2535,6 +2636,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Prost_Inverse_Problem_Regularization_with_Hierarchical_Variational_Autoencoders_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11217", @@ -2560,6 +2662,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_3D-aware_Blending_with_Generative_NeRFs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.06608", @@ -2585,6 +2688,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_NeMF_Inverse_Volume_Rendering_with_Neural_Microflake_Field_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.00782", @@ -2610,6 +2714,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ge_Preserve_Your_Own_Correlation_A_Noise_Prior_for_Video_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.10474", @@ -2635,6 +2740,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_iVS-Net_Learning_Human_View_Synthesis_from_Internet_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2660,6 +2766,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_EGC_Image_Generation_and_Classification_via_a_Diffusion_Energy-Based_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02012", @@ -2685,6 +2792,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiao_Automatic_Animation_of_Hair_Blowing_in_Still_Portrait_Photos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14207", @@ -2710,6 +2818,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Karnewar_HoloFusion_Towards_Photo-realistic_3D_Generative_Modeling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14244", @@ -2735,6 +2844,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Foreground_Object_Search_by_Distilling_Composite_Image_Feature_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04990", @@ -2760,6 +2870,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_OrthoPlanes_A_Novel_Representation_for_Better_3D-Awareness_of_GANs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.15830", @@ -2785,6 +2896,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_3DHumanGAN_3D-Aware_Human_Image_Generation_with_3D_Pose_Mapping_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.07378", @@ -2810,6 +2922,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_MODA_Mapping-Once_Audio-driven_Portrait_Animation_with_Dual_Attentions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10008", @@ -2835,6 +2948,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Minimum_Latency_Deep_Online_Video_Stabilization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.02073", @@ -2860,6 +2974,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chai_StableVideo_Text-driven_Consistency-aware_Diffusion_Video_Editing_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09592", @@ -2885,6 +3000,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Patashnik_Localizing_Object-Level_Shape_Variations_with_Text-to-Image_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11306", @@ -2910,6 +3026,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hong_Implicit_Identity_Representation_Conditioned_Memory_Compensation_Network_for_Talking_Head_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09906", @@ -2935,6 +3052,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_ESSAformer_Efficient_Transformer_for_Hyperspectral_Image_Super-resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14010", @@ -2960,6 +3078,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qin_GlueGen_Plug_and_Play_Multi-modal_Encoders_for_X-to-image_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10056", @@ -2985,6 +3104,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_UHDNeRF_Ultra-High-Definition_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -3010,6 +3130,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_All-to-Key_Attention_for_Arbitrary_Style_Transfer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.04105", @@ -3035,6 +3156,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yildirim_Diverse_Inpainting_and_Editing_with_GAN_Inversion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15033", @@ -3060,6 +3182,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_MoTIF_Learning_Motion_Trajectories_with_Local_Implicit_Neural_Functions_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07988", @@ -3085,6 +3208,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Iqbal_RANA_Relightable_Articulated_Neural_Avatars_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.03237", @@ -3110,6 +3234,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_DiffCloth_Diffusion_Based_Garment_Synthesis_and_Manipulation_via_Structural_Cross-modal_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11206", @@ -3135,6 +3260,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/shgao/MDT", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_Masked_Diffusion_Transformer_is_a_Strong_Image_Synthesizer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14389", @@ -3160,6 +3286,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_FreeDoM_Training-Free_Energy-Guided_Conditional_Diffusion_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09833", @@ -3185,6 +3312,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_CLNeRF_Continual_Learning_Meets_NeRF_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -3210,6 +3338,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chu_Rethinking_Fast_Fourier_Convolution_in_Image_Inpainting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -3235,6 +3364,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ceylan_Pix2Video_Video_Editing_using_Image_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12688", @@ -3260,6 +3390,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qiao_Multi-view_Spectral_Polarization_Propagation_for_Video_Glass_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -3285,6 +3416,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Le_Moing_WALDO_Future_Video_Synthesis_Using_Object_Layer_Decomposition_and_Parametric_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.14308", @@ -3310,6 +3442,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Ray_Conditioning_Trading_Photo-consistency_for_Photo-realism_in_Multi-view_Image_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.13681", @@ -3335,6 +3468,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Text-Conditioned_Sampling_Framework_for_Text-to-Image_Generation_with_Masked_Generative_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01515", @@ -3360,6 +3494,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Davtyan_Efficient_Video_Prediction_via_Sparsely_Conditioned_Flow_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.14575", diff --git a/json_data/low-level-and-physics-based-vision.json b/json_data/low-level-and-physics-based-vision.json index dbdc7d4..d550ae3 100644 --- a/json_data/low-level-and-physics-based-vision.json +++ b/json_data/low-level-and-physics-based-vision.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_Hierarchical_Contrastive_Learning_for_Pattern-Generalizable_Image_Corruption_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14061", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Miao_DDS2M_Self-Supervised_Denoising_Diffusion_Spatio-Spectral_Model_for_Hyperspectral_Image_Restoration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06682", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_From_Sky_to_the_Ground_A_Large-scale_Benchmark_and_Simple_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03867", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fu_VAPCNet_Viewpoint-Aware_3D_Point_Cloud_Completion_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_AccFlow_Backward_Accumulation_for_Long-Range_Optical_Flow_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13133", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_Improving_Transformer-based_Image_Matching_by_Cascaded_Capturing_Spatially_Informative_Keypoints_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.02885", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Low-Light_Image_Enhancement_with_Multi-Stage_Residue_Quantization_and_Brightness-Aware_Attention_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pan_Random_Sub-Samples_Generation_for_Self-Supervised_Real_Image_Denoising_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16825", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ouyang_RSFNet_A_White-Box_Image_Retouching_Approach_using_Region-Specific_Color_Filters_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08682", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jaiswal_Physics-Driven_Turbulence_Image_Restoration_with_Stochastic_Refinement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10603", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gou_SYENet_A_Simple_Yet_Effective_Network_for_Multiple_Low-Level_Vision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08137", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jang_Self-supervised_Image_Denoising_with_Downsampled_Invariance_Loss_and_Conditional_Blind-Spot_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.09507", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Variational_Degeneration_to_Structural_Refinement_A_Unified_Framework_for_Superimposed_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Reconstructed_Convolution_Module_Based_Look-Up_Tables_for_Efficient_Image_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08544", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_Self-supervised_Pre-training_for_Mirror_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Downscaled_Representation_Matters_Improving_Image_Rescaling_with_Collaborative_Downscaled_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.10643", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Varghese_Self-supervised_Monocular_Underwater_Depth_Recovery_Image_Restoration_and_a_Real-sea_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ji_Rethinking_Video_Frame_Interpolation_from_Shutter_Mode_Induced_Degradation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ji_Single_Image_Deblurring_with_Row-dependent_Blur_Magnitude_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Multi-view_Self-supervised_Disentanglement_for_General_Image_Denoising_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05049", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Joint_Demosaicing_and_Deghosting_of_Time-Varying_Exposures_for_Single-Shot_HDR_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yi_Diff-Retinex_Rethinking_Low-light_Image_Enhancement_with_A_Generative_Diffusion_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13164", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Dual_Aggregation_Transformer_for_Image_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03364", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yoo_Video_Object_Segmentation-aware_Video_Frame_Interpolation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zou_RawHDR_High_Dynamic_Range_Image_Reconstruction_from_a_Single_Raw_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02020", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Multi-Scale_Residual_Low-Pass_Filter_Network_for_Image_Deblurring_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dai_Indoor_Depth_Recovery_Based_on_Deep_Unfolding_with_Non-Local_Prior_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Learning_Correction_Filter_via_Degradation-Adaptive_Regression_for_Blind_Single_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_Learning_Non-Local_Spatial-Angular_Correlation_for_Light_Field_Image_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.08058", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Both_Diverse_and_Realism_Matter_Physical_Attribute_and_Style_Alignment_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Learned_Image_Reasoning_Prior_Penetrates_Deep_Unfolding_Network_for_Panchromatic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16083", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_The_Devil_is_in_the_Upsampling_Architectural_Decisions_Made_Simpler_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.11409", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_SimFIR_A_Simple_Framework_for_Fisheye_Image_Rectification_with_Self-supervised_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09040", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Exploring_Temporal_Frequency_Spectrum_in_Deep_Video_Deblurring_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_ExposureDiffusion_Learning_to_Expose_for_Low-light_Image_Enhancement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07710", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_High-Resolution_Document_Shadow_Removal_via_A_Large-Scale_Real-World_Dataset_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14221", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Duan_Towards_Saner_Deep_Image_Registration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09696", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_VideoFlow_Exploiting_Temporal_Cues_for_Multi-frame_Optical_Flow_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08340", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_Scene_Matters_Model-based_Deep_Video_Compression_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.04557", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cho_Non-Coaxial_Event-Guided_Motion_Deblurring_with_Spatial_Alignment_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_Retinexformer_One-stage_Retinex-based_Transformer_for_Low-light_Image_Enhancement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06705", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Feature_Modulation_Transformer_Cross-Refinement_of_Global_Representation_via_High-Frequency_Prior_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05022", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_MVPSNet_Fast_Generalizable_Multi-view_Photometric_Stereo_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.11167", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_FSI_Frequency_and_Spatial_Interactive_Learning_for_Image_Restoration_in_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Spherical_Space_Feature_Decomposition_for_Guided_Depth_Map_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08942", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Empowering_Low-Light_Image_Enhancer_through_Customized_Learnable_Priors_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01958", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Learning_Image_Harmonization_in_the_Linear_Color_Space_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_Under-Display_Camera_Image_Restoration_with_Scattering_Effect_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04163", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Iterative_Soft_Shrinkage_Learning_for_Efficient_Image_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09650", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Quan_Single_Image_Defocus_Deblurring_via_Implicit_Neural_Inverse_Kernels_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_Degradation-Resistant_Unfolding_Network_for_Heterogeneous_Image_Fusion_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Seo_Graphics2RAW_Mapping_Computer_Graphics_Images_to_Sensor_RAW_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Lighting_up_NeRF_via_Unsupervised_Decomposition_and_Enhancement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10664", @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_Unsupervised_Image_Denoising_in_Real-World_Scenarios_via_Self-Collaboration_Parallel_Generative_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06776", @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_Adverse_Weather_Removal_with_Codebook_Priors_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_MSRA-SR_Image_Super-resolution_Transformer_with_Multi-scale_Shared_Representation_Acquisition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Quan_Deep_Video_Demoireing_via_Compact_Invertible_Dyadic_Decomposition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_SILT_Shadow-Aware_Iterative_Label_Tuning_for_Learning_to_Detect_Shadows_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12064", @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Innovating_Real_Fisheye_Image_Correction_with_Dual_Diffusion_Architecture_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1485,6 +1544,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Adaptive_Illumination_Mapping_for_Shadow_Detection_in_Raw_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1510,6 +1570,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_GEDepth_Ground_Embedding_for_Monocular_Depth_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.09975", @@ -1535,6 +1596,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Lightweight_Image_Super-Resolution_with_Superpixel_Token_Interaction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1560,6 +1622,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Unfolding_Framework_with_Prior_of_Convolution-Transformer_Mixture_and_Uncertainty_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.11316", @@ -1585,6 +1648,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Efficient_Unified_Demosaicing_for_Bayer_and_Non-Bayer_Patterned_Image_Sensors_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10667", @@ -1610,6 +1674,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chung_LAN-HDR_Luminance-based_Alignment_Network_for_High_Dynamic_Range_Video_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11116", @@ -1635,6 +1700,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Niu_Fine-grained_Visible_Watermark_Removal_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1660,6 +1726,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_SRFormer_Permuted_Self-Attention_for_Single_Image_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09735", @@ -1685,6 +1752,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_DLGSANet_Lightweight_Dynamic_Local_and_Global_Self-Attention_Networks_for_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.02031", @@ -1710,6 +1778,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qiu_MB-TaylorFormer_Multi-Branch_Efficient_Transformer_Expanded_by_Taylor_Formula_for_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14036", @@ -1735,6 +1804,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Multi-Frequency_Representation_Enhancement_with_Privilege_Information_for_Video_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1760,6 +1830,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_COMPASS_High-Efficiency_Deep_Image_Compression_with_Arbitrary-scale_Spatial_Scalability_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.07926", @@ -1785,6 +1856,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tel_Alignment-free_HDR_Deghosting_with_Semantics_Consistent_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.18135", @@ -1810,6 +1882,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zubic_From_Chaos_Comes_Order_Ordering_Event_Representations_for_Object_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.13455", @@ -1835,6 +1908,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fu_Towards_High-Quality_Specular_Highlight_Removal_by_Leveraging_Large-Scale_Synthetic_Data_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.06302", @@ -1860,6 +1934,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yoshimura_DynamicISP_Dynamically_Controlled_Image_Signal_Processor_for_Image_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.01146", @@ -1885,6 +1960,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fu_Dancing_in_the_Dark_A_Benchmark_towards_General_Low-light_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1910,6 +1986,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shen_Dec-Adapter_Exploring_Efficient_Decoder-Side_Adapter_for_Bridging_Screen_Content_and_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1935,6 +2012,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_OmniZoomer_Learning_to_Move_and_Zoom_in_on_Sphere_at_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08114", @@ -1960,6 +2038,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_Pyramid_Dual_Domain_Injection_Network_for_Pan-sharpening_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1985,6 +2064,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Implicit_Neural_Representation_for_Cooperative_Low-light_Image_Enhancement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11722", @@ -2010,6 +2090,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ershov_Physically-Plausible_Illumination_Distribution_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2035,6 +2116,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_Score_Priors_Guided_Deep_Variational_Inference_for_Unsupervised_Real-World_Single_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04682", @@ -2060,6 +2142,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Semantic-Aware_Dynamic_Parameter_for_Video_Inpainting_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2085,6 +2168,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Pixel_Adaptive_Deep_Unfolding_Transformer_for_Hyperspectral_Image_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10820", @@ -2110,6 +2194,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Improving_Lens_Flare_Removal_with_General-Purpose_Pipeline_and_Multiple_Light_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16460", @@ -2135,6 +2220,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_RFD-ECNet_Extreme_Underwater_Image_Compression_with_Reference_to_Feature_Dictionary_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08721", @@ -2160,6 +2246,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Learning_Continuous_Exposure_Value_Representations_for_Single-Image_HDR_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03900", @@ -2185,6 +2272,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cui_Focal_Network_for_Image_Restoration_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2210,6 +2298,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_CIRI_Curricular_Inactivation_for_Residue-aware_One-shot_Video_Inpainting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2235,6 +2324,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Beyond_Image_Borders_Learning_Feature_Extrapolation_for_Unbounded_Image_Composition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.12042", @@ -2260,6 +2350,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yin_MetaF2N_Blind_Image_Super-Resolution_by_Learning_Efficient_Model_Adaptation_from_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08113", @@ -2285,6 +2376,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Boundary-Aware_Divide_and_Conquer_A_Diffusion-Based_Solution_for_Unsupervised_Shadow_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2310,6 +2402,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Leveraging_Inpainting_for_Single-Image_Shadow_Removal_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.05361", @@ -2335,6 +2428,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lai_Hybrid_Spectral_Denoising_Transformer_with_Guided_Attention_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09040", @@ -2360,6 +2454,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tedla_Examining_Autoexposure_for_Challenging_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04542", @@ -2385,6 +2480,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shang_Self-supervised_Learning_to_Bring_Dual_Reversed_Rolling_Shutter_Images_Alive_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.19862", @@ -2410,6 +2506,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xia_DiffIR_Efficient_Diffusion_Model_for_Image_Restoration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09472", @@ -2435,6 +2532,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Sparse_Sampling_Transformer_with_Uncertainty-Driven_Ranking_for_Unified_Removal_of_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14153", @@ -2460,6 +2558,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_LMR_A_Large-Scale_Multi-Reference_Dataset_for_Reference-Based_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.04970", @@ -2485,6 +2584,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Low-Light_Image_Enhancement_with_Illumination-Aware_Gamma_Correction_and_Complete_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08220", @@ -2510,6 +2610,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_Single_Image_Reflection_Separation_via_Component_Synergy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10027", @@ -2535,6 +2636,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Learning_Rain_Location_Prior_for_Nighttime_Deraining_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2560,6 +2662,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Choi_Exploring_Positional_Characteristics_of_Dual-Pixel_Data_for_Camera_Autofocus_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2585,6 +2688,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ko_Continuously_Masked_Transformer_for_Image_Inpainting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2610,6 +2714,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tuo_Learning_Data-Driven_Vector-Quantized_Degradation_Model_for_Animation_Video_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09826", @@ -2635,6 +2740,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Spatially-Adaptive_Feature_Modulation_for_Efficient_Image_Super-Resolution_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.13800", @@ -2660,6 +2766,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Video_Adverse-Weather-Component_Suppression_Network_via_Weather_Messenger_and_Adversarial_Backpropagation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.13700", @@ -2685,6 +2792,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Snow_Removal_in_Video_A_New_Dataset_and_A_Novel_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2710,6 +2818,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Boosting_Single_Image_Super-Resolution_via_Partial_Channel_Shifting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2735,6 +2844,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Towards_Real-World_Burst_Image_Super-Resolution_Benchmark_and_Method_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04803", @@ -2760,6 +2870,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_On_the_Effectiveness_of_Spectral_Discriminators_for_Perceptual_Quality_Improvement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12027", @@ -2785,6 +2896,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qi_E2NeRF_Event_Enhanced_Neural_Radiance_Fields_from_Blurry_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2810,6 +2922,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zou_Iterative_Denoiser_and_Noise_Estimator_for_Self-Supervised_Image_Denoising_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2835,6 +2948,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jin_Lighting_Every_Darkness_in_Two_Pairs_A_Calibration-Free_Pipeline_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03448", @@ -2860,6 +2974,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Quan_Fingerprinting_Deep_Image_Restoration_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/low-level-vision-and-theory.json b/json_data/low-level-vision-and-theory.json index ab53e51..401b735 100644 --- a/json_data/low-level-vision-and-theory.json +++ b/json_data/low-level-vision-and-theory.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_A_5-Point_Minimal_Solver_for_Event_Camera_Relative_Motion_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dibene_General_Planar_Motion_from_a_Pair_of_3D_Correspondences_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bolduc_Beyond_the_Pixel_a_Photometrically_Calibrated_HDR_Dataset_for_Luminance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.12372", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_DDFM_Denoising_Diffusion_Model_for_Multi-Modality_Image_Fusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06840", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_Iterative_Prompt_Learning_for_Unsupervised_Backlit_Image_Enhancement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17569", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_Similarity_Min-Max_Zero-Shot_Day-Night_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08779", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Multi-interactive_Feature_Learning_and_a_Full-time_Multi-modality_Benchmark_for_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.02097", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Klotz_Computational_3D_Imaging_with_Position_Sensors_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Passive_Ultra-Wideband_Single-Photon_Imaging_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Arrigoni_Viewing_Graph_Solvability_in_Practice_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ding_Minimal_Solutions_to_Generalized_Three-View_Relative_Pose_Problem_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sundar_SoDaCam_Software-defined_Cameras_via_Single-Photon_Imaging_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.00066", diff --git a/json_data/machine-learning-and-dataset.json b/json_data/machine-learning-and-dataset.json index ce2c606..918b42f 100644 --- a/json_data/machine-learning-and-dataset.json +++ b/json_data/machine-learning-and-dataset.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_DiffusionDet_Diffusion_Model_for_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.09788", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_V3Det_Vast_Vocabulary_Visual_Detection_Dataset_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.03752", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_PointOdyssey_A_Large-Scale_Synthetic_Dataset_for_Long-Term_Point_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15055", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cho_Label-Free_Event-based_Object_Recognition_via_Joint_Learning_with_Image_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09383", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_Vision_HGNN_An_Image_is_More_than_a_Graph_of_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chang_Revisiting_Vision_Transformer_from_the_View_of_Path_Ensemble_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06548", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ning_All_in_Tokens_Unifying_Output_Space_of_Visual_Tasks_via_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.02229", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Mitigating_and_Evaluating_Static_Bias_of_Action_Representations_in_the_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.12883", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_Deep_Multitask_Learning_with_Progressive_Parameter_Sharing_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tu_Implicit_Temporal_Modeling_with_Learnable_Alignment_for_Video_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.10465", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Unmasked_Teacher_Towards_Training-Efficient_Video_Foundation_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16058", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Large-Scale_Person_Detection_and_Localization_Using_Overhead_Fisheye_Cameras_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08252", diff --git a/json_data/machine-learning-other-than-deep-learning.json b/json_data/machine-learning-other-than-deep-learning.json index 812b9e4..85e0108 100644 --- a/json_data/machine-learning-other-than-deep-learning.json +++ b/json_data/machine-learning-other-than-deep-learning.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zou_Adaptive_Calibrator_Ensemble_Navigating_Test_Set_Difficulty_in_Out-of-Distribution_Scenarios_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ji_Anchor_Structure_Regularization_Induced_Multi-view_Subspace_Clustering_via_Enhanced_Tensor_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Meta_OOD_Learning_For_Continuously_Adaptive_OOD_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11705", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_Learning_with_Diversity_Self-Expanded_Equalization_for_Better_Generalized_Deep_Metric_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Bold_but_Cautious_Unlocking_the_Potential_of_Personalized_Federated_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11103", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_Federated_Learning_Over_Images_Vertical_Decompositions_and_Pre-Trained_Backbones_Are_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03237", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Deng_Towards_Inadequately_Pre-trained_Models_in_Transfer_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2203.04668", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Do_Reducing_Training_Time_in_Cross-Silo_Federated_Learning_Using_Multigraph_Topology_ICCV_2023_paper.pdf", "paper_arxiv_id": "2207.09657", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Membrane_Potential_Batch_Normalization_for_Spiking_Neural_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08359", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guan_Revisit_PCA-based_Technique_for_Out-of-Distribution_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Cross-view_Topology_Based_Consistent_and_Complementary_Information_for_Deep_Multi-view_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/medical-and-biological-vision-cell-microscopy.json b/json_data/medical-and-biological-vision-cell-microscopy.json index cd71da2..4344304 100644 --- a/json_data/medical-and-biological-vision-cell-microscopy.json +++ b/json_data/medical-and-biological-vision-cell-microscopy.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nakhli_CO-PILOT_Dynamic_Top-Down_Point_Cloud_with_Conditional_Neighborhood_Aggregation_for_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_SKiT_a_Fast_Key_Information_Video_Transformer_for_Online_Surgical_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_XNet_Wavelet-Based_Low_and_High_Frequency_Fusion_Networks_for_Fully-_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Schmidt_Probabilistic_Modeling_of_Inter-_and_Intra-observer_Variability_in_Medical_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11397", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Learning_Cross-Representation_Affinity_Consistency_for_Sparsely_Supervised_Biomedical_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Dual_Meta-Learning_with_Longitudinally_Consistent_Regularization_for_One-Shot_Brain_Tissue_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06774", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jeong_BlindHarmony_Blind_Harmonization_for_MR_Images_via_Flow_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.10732", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ji_Continual_Segment_Towards_a_Single_Unified_and_Non-forgetting_Continual_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_CLIP-Driven_Universal_Model_for_Organ_Segmentation_and_Tumor_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.00785", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dawidowicz_LIMITR_Leveraging_Local_Information_for_Medical_Image-Text_Representation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11755", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_Taxonomy_Adaptive_Cross-Domain_Adaptation_in_Medical_Imaging_via_Optimization_Trajectory_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14709", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_CuNeRF_Cube-Based_Neural_Radiance_Field_for_Zero-Shot_Medical_Image_Arbitrary-Scale_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16242", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Learning_to_Distill_Global_Representation_for_Sparse-View_CT_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08463", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Preserving_Tumor_Volumes_for_Unsupervised_Medical_Image_Registration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.10153", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ashesh_uSplit_Image_Decomposition_for_Fluorescence_Microscopy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.12872", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Rethinking_Multi-Contrast_MRI_Super-Resolution_Rectangle-Window_Cross-Attention_Transformer_and_Arbitrary-Scale_Upsampling_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Multimodal_Optimal_Transport-based_Co-Attention_Transformer_with_Global_Structure_Consistency_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.08330", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yuan_4D_Myocardium_Reconstruction_with_Decoupled_Motion_and_Shape_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14083", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wolf_Unsupervised_Learning_of_Object-Centric_Embeddings_for_Cell_Instance_Segmentation_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.08501", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rodriguez-Puigvert_LightDepth_Single-View_Depth_Self-Supervision_from_Illumination_Decline_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10525", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_BoMD_Bag_of_Multi-label_Descriptors_for_Noisy_Chest_X-ray_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2203.01937", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lei_Decomposition-Based_Variational_Network_for_Multi-Contrast_MRI_Super-Resolution_and_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_TopoSeg_Topology-Aware_Nuclear_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qiu_Scratch_Each_Others_Back_Incomplete_Multi-Modal_Brain_Tumor_Segmentation_via_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_CancerUniT_Towards_a_Single_Unified_Model_for_Effective_Detection_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.12291", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qiu_Gram-based_Attentive_Neural_Ordinary_Differential_Equations_Network_for_Video_Nystagmography_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_ConSlide_Asynchronous_Hierarchical_Interaction_Transformer_with_Breakup-Reorganize_Rehearsal_for_Continual_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13324", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_PRIOR_Prototype_Representation_Joint_Learning_from_Medical_Images_and_Reports_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12577", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_MedKLIP_Medical_Knowledge_Enhanced_Language-Image_Pre-Training_for_X-ray_Diagnosis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.02228", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Affine-Consistent_Transformer_for_Multi-Class_Cell_Nuclei_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.14154", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Menten_A_Skeletonization_Algorithm_for_Gradient-Based_Optimization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02527", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Improving_Representation_Learning_for_Histopathologic_Images_with_Cluster_Constraints_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.12334", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Konwer_Enhancing_Modality-Agnostic_Representations_via_Meta-Learning_for_Brain_Tumor_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.04308", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Miao_CauSSL_Causality-inspired_Semi-supervised_Learning_for_Medical_Image_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Butoi_UniverSeg_Universal_Medical_Image_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06131", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_MRM_Masked_Relation_Modeling_for_Medical_Image_Pre-Training_with_Genetics_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qu_Boosting_Whole_Slide_Image_Classification_from_the_Perspectives_of_Distribution_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pan_Adaptive_Template_Transformer_for_Mitochondria_Segmentation_in_Electron_Microscopy_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Cross-Modal_Translation_and_Alignment_for_Survival_Analysis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.12855", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shao_LNPL-MIL_Learning_from_Noisy_Pseudo_Labels_for_Promoting_Multiple_Instance_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/motion-estimation-matching-and-tracking.json b/json_data/motion-estimation-matching-and-tracking.json index 7cb3f29..c778434 100644 --- a/json_data/motion-estimation-matching-and-tracking.json +++ b/json_data/motion-estimation-matching-and-tracking.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/Mathux/TMR", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Petrovich_TMR_Text-to-Motion_Retrieval_Using_Contrastive_3D_Human_Motion_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.00976", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Sequential_Texts_Driven_Cohesive_Motions_Synthesis_with_Natural_Transitions_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Auxiliary_Tasks_Benefit_3D_Skeleton-based_Human_Motion_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08942", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Deng_Explicit_Motion_Disentangling_for_Efficient_Optical_Flow_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mancusi_TrackFlow_Multi-Object_tracking_with_Normalizing_Flows_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11513", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_HumanMAC_Masked_Motion_Completion_for_Human_Motion_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.03665", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Geometrized_Transformer_for_Self-Supervised_Homography_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yuan_SemARFlow_Injecting_Semantics_into_Unsupervised_Optical_Flow_Estimation_for_Autonomous_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06209", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pakulev_NeSS-ST_Detecting_Good_and_Stable_Keypoints_with_a_Neural_Stability_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_Robust_Object_Modeling_for_Visual_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05140", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tanke_Social_Diffusion_Long-term_Multiple_Human_Motion_Anticipation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kang_Exploring_Lightweight_Hierarchical_Vision_Transformers_for_Efficient_Visual_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06904", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Aliakbarian_HMD-NeMo_Online_3D_Avatar_Motion_Generation_From_Sparse_Observations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11261", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Learning_Fine-Grained_Features_for_Pixel-Wise_Video_Correspondences_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03040", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_GAFlow_Incorporating_Gaussian_Attention_into_Optical_Flow_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_Occ2Net_Robust_Image_Matching_Based_on_3D_Occupancy_Estimation_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16160", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Locomotion-Action-Manipulation_Synthesizing_Human-Scene_Interactions_in_Complex_3D_Environments_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.02667", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_Trajectory_Unified_Transformer_for_Pedestrian_Trajectory_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_TMA_Temporal_Motion_Aggregation_for_Event-based_Optical_Flow_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11629", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Paredes-Valles_Taming_Contrast_Maximization_for_Learning_Sequential_Low-latency_Event-based_Optical_Flow_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pautrat_GlueStick_Robust_Image_Matching_by_Sticking_Points_and_Lines_Together_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02008", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Segu_DARTH_Holistic_Test-time_Adaptation_for_Multiple_Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Santellani_S-TREK_Sequential_Translation_and_Rotation_Equivariant_Keypoints_for_Local_Feature_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14598", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Integrating_Boxes_and_Masks_A_Multi-Object_Framework_for_Unified_Visual_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13266", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Delattre_Robust_Frame-to-Frame_Camera_Rotation_Estimation_in_Crowded_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08588", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Sparse_Instance_Conditioned_Multimodal_Trajectory_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_PoseDiffusion_Solving_Pose_Estimation_via_Diffusion-aided_Bundle_Adjustment_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.15667", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ding_3DMOTFormer_Graph_Transformer_for_Online_3D_Multi-Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06635", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Maeda_Fast_Inference_and_Update_of_Probabilistic_Density_Estimation_on_Trajectory_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08824", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Supervised_Homography_Learning_with_Realistic_Dataset_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15353", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Joint-Relation_Transformer_for_Multi-Person_Motion_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04808", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ponghiran_Event-based_Temporally_Dense_Optical_Flow_Estimation_with_Sequential_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.01244", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_3D_Motion_Magnification_Visualizing_Subtle_Motions_from_Time-Varying_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03757", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_Learning_Optical_Flow_from_Event_Camera_with_Rendered_Dataset_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11011", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tran_Persistent-Transient_Duality_A_Multi-Mechanism_Approach_for_Modeling_Human-Object_Interaction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12729", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_Deep_Homography_Mixture_for_Single_Image_Rolling_Shutter_Correction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Fast_Neural_Scene_Flow_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.09121", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nie_RLSAC_Reinforcement_Learning_Enhanced_Sample_Consensus_for_End-to-End_Robust_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05318", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_MeMOTR_Long-Term_Memory-Augmented_Transformer_for_Multi-Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15700", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_MBPTrack_Improving_3D_Point_Cloud_Tracking_with_Memory_Networks_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05071", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cui_SportsMOT_A_Large_Multi-Object_Tracking_Dataset_in_Multiple_Sports_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.05170", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Heterogeneous_Diversity_Driven_Active_Learning_for_Multi-Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gong_TM2D_Bimodality_Driven_3D_Dance_Generation_via_Music-Text_Integration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02419", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Synchronize_Feature_Extracting_and_Matching_A_Single_Branch_Framework_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12549", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Collaborative_Tracking_Learning_for_Frame-Rate-Insensitive_Multi-Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05911", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_CiteTracker_Correlating_Image_and_Text_for_Visual_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11322", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Athanasiou_SINC_Spatial_Composition_of_3D_Human_Motions_for_Simultaneous_Action_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.10417", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Uncertainty-aware_Unsupervised_Multi-Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15409", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_PVT_A_Simple_End-to-End_Latency-Aware_Visual_Tracking_Framework_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bae_EigenTrajectory_Low-Rank_Descriptors_for_Multi-Modal_Trajectory_Forecasting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09306", @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wan_RPEFlow_Multimodal_Fusion_of_RGB-PointCloud-Event_for_Joint_Optical_Flow_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.15082", @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_Multi-Scale_Bidirectional_Recurrent_Network_with_Hybrid_Correlation_for_Point_Cloud_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_ReST_A_Reconfigurable_Spatial-Temporal_Graph_Model_for_Multi-Camera_Multi-Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13229", @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Doersch_TAPIR_Tracking_Any_Point_with_Per-Frame_Initialization_and_Temporal_Refinement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.08637", @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_IHNet_Iterative_Hierarchical_Network_Guided_by_High-Resolution_Estimated_Information_for_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ng_Can_Language_Models_Learn_to_Listen_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10897", @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lai_XVO_Generalized_Visual_Odometry_via_Cross-Modal_Self-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Schmalfuss_Distracting_Downpour_Adversarial_Weather_Attacks_for_Motion_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.06716", @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Foreground-Background_Distribution_Modeling_Transformer_for_Visual_Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/multimodal-learning.json b/json_data/multimodal-learning.json index 3fef37f..b6513f5 100644 --- a/json_data/multimodal-learning.json +++ b/json_data/multimodal-learning.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhai_SLAN_Self-Locator_Aided_Network_for_Vision-Language_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.16208", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Long_Task-Oriented_Multi-Modal_Mutual_Leaning_for_Vision-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17169", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_TinyCLIP_CLIP_Distillation_via_Affinity_Mimicking_and_Weight_Inheritance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.12314", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shvetsova_In-Style_Bridging_Text_and_Uncurated_Videos_with_Style_Transfer_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08928", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Swetha_Preserving_Modality_Structure_Improves_Multi-Modal_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13077", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cho_Distribution-Aware_Prompt_Tuning_for_Vision-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03406", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qin_SupFusion_Supervised_LiDAR-Camera_Fusion_for_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.07084", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Distribution-Consistent_Modal_Recovering_for_Incomplete_Multimodal_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Fg-T2M_Fine-Grained_Text-Driven_Human_Motion_Generation_via_Diffusion_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.06284", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Cross-Modal_Orthogonal_High-Rank_Augmentation_for_RGB-Event_Transformer-Trackers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.04129", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shukor_eP-ALM_Efficient_Perceptual_Augmentation_of_Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11403", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Generating_Visual_Scenes_from_Touch_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.15117", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Multimodal_High-order_Relation_Transformer_for_Scene_Boundary_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chiquier_Muscles_in_Action_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.02978", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_Self-Evolved_Dynamic_Expansion_Model_for_Task-Free_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Multi-Event_Video-Text_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11551", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Referring_Image_Segmentation_Using_Text_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14575", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Audio-Visual_Deception_Detection_DOLOS_Dataset_and_Parameter-Efficient_Crossmodal_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12745", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tan_EMMN_Emotional_Motion_Memory_Network_for_Audio-driven_Emotional_Talking_Face_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_CLIP2Point_Transfer_CLIP_to_Point_Cloud_Classification_with_Image-Depth_Pre-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.01055", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Speech2Lip_High-fidelity_Speech_to_Lip_Generation_by_Learning_from_a_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04814", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Deng_GrowCLIP_Data-Aware_Automatic_Model_Growing_for_Large-scale_Contrastive_Language-Image_Pre-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11331", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_A_Retrospect_to_Multi-prompt_Learning_across_Vision_and_Language_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_ChartReader_A_Unified_Framework_for_Chart_Derendering_and_Comprehension_without_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02173", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Boosting_Multi-modal_Model_Performance_with_Adaptive_Gradient_Modulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07686", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Varma_ViLLA_Fine-Grained_Vision-Language_Representation_Learning_from_Real-World_Data_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11194", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Robust_Referring_Video_Object_Segmentation_with_Cyclic_Structural_Consensus_ICCV_2023_paper.pdf", "paper_arxiv_id": "2207.01203", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Fantasia3D_Disentangling_Geometry_and_Appearance_for_High-quality_Text-to-3D_Content_Creation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13873", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_CTPTowards_Vision-Language_Continual_Pretraining_via_Compatible_Momentum_Contrast_and_Topology_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07146", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xuan_Narrator_Towards_Natural_Control_of_Human-Scene_Interaction_Generation_via_Relationship_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09410", diff --git a/json_data/navigation-and-autonomous-driving.json b/json_data/navigation-and-autonomous-driving.json index 3128570..af0c1bf 100644 --- a/json_data/navigation-and-autonomous-driving.json +++ b/json_data/navigation-and-autonomous-driving.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gasperini_Robust_Monocular_Depth_Estimation_under_Challenging_Conditions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09711", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_UMC_A_Unified_Bandwidth-efficient_and_Multi-resolution_based_Collaborative_Perception_Framework_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12400", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_View_Consistent_Purification_for_Accurate_Cross-View_Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08110", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiao_Semi-supervised_Semantics-guided_Adversarial_Training_for_Robust_Trajectory_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2205.14230", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Deng_NeRF-LOAM_Neural_Implicit_Representation_for_Large-Scale_Incremental_LiDAR_Odometry_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10709", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_MapPrior_Birds-Eye_View_Map_Layout_Estimation_with_Generative_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12963", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jaeger_Hidden_Biases_of_End-to-End_Driving_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.07957", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dang_Search_for_or_Navigate_to_Dual_Adaptive_Thinking_for_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.00553", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_BiFF_Bi-level_Future_Fusion_with_Polyline-based_Coordinate_for_Interactive_Trajectory_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.14161", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Manivasagam_Towards_Zero_Domain_Gap_A_Comprehensive_Study_of_Realistic_LiDAR_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_Clustering_based_Point_Cloud_Representation_Learning_for_3D_Analysis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14605", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Aydemir_ADAPT_Efficient_Multi-Agent_Trajectory_Prediction_with_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14187", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_MV-DeepSDF_Implicit_Modeling_with_Multi-Sweep_Point_Clouds_for_3D_Vehicle_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16715", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_Learning_Vision-and-Language_Navigation_from_YouTube_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11984", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_TrajPAC_Towards_Robustness_Verification_of_Pedestrian_Trajectory_Prediction_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05985", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_VAD_Vectorized_Scene_Representation_for_Efficient_Autonomous_Driving_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12077", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Traj-MAE_Masked_Autoencoders_for_Trajectory_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06697", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yao_Sparse_Point_Guided_3D_Lane_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_A_Simple_Vision_Transformer_for_Weakly_Semi-supervised_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pourkeshavarz_Learn_TAROT_with_MENTOR_A_Meta-Learned_Self-Supervised_Approach_for_Trajectory_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_FocalFormer3D_Focusing_on_Hard_Instance_for_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04556", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tong_Scene_as_Occupancy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.02851", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Real-Time_Neural_Rasterization_for_Large_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2311.05607", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Belder_A_Game_of_Bundle_Adjustment_-_Learning_Efficient_Convergence_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13270", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_Efficient_Transformer-based_3D_Object_Detection_with_Dynamic_Token_Halting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05078", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_RegFormer_An_Efficient_Projection-Aware_Transformer_Network_for_Large-Scale_Point_Cloud_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12384", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xia_CASSPR_Cross_Attention_Single_Scan_Place_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.12542", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jin_Recursive_Video_Lane_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11106", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Parametric_Depth_Based_Feature_Representation_Learning_for_Object_Detection_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.04106", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_SHIFT3D_Synthesizing_Hard_Inputs_For_Tricking_3D_Detectors_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05810", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_Bootstrap_Motion_Forecasting_With_Self-Consistent_Constraints_ICCV_2023_paper.pdf", "paper_arxiv_id": "2204.05859", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Klinghoffer_Towards_Viewpoint_Robustness_in_Birds_Eye_View_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05192", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Choi_R-Pred_Two-Stage_Motion_Prediction_Via_Tube-Query_Attention-Based_Trajectory_Refinement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.08609", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_INT2_Interactive_Trajectory_Prediction_at_Intersections_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_MatrixVT_Efficient_Multi-Camera_to_BEV_Transformation_for_3D_Perception_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.10593", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Unsupervised_Self-Driving_Attention_Prediction_via_Uncertainty_Mining_and_Knowledge_Embedding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09706", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_SVQNet_Sparse_Voxel-Adjacent_Query_Network_for_4D_Spatio-Temporal_LiDAR_Semantic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13323", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Seff_MotionLM_Multi-Agent_Motion_Forecasting_as_Language_Modeling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16534", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Can_Improving_Online_Lane_Graph_Extraction_by_Object-Lane_Clustering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10947", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Najibi_Unsupervised_3D_Perception_with_2D_Vision-Language_Distillation_for_Autonomous_Driving_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14491", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_Self-Supervised_Monocular_Depth_Estimation_by_Direction-aware_Cumulative_Convolution_Network_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05605", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Agarwal_Ordered_Atomic_Activity_for_Fine-grained_Interactive_Traffic_Scenario_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_DistillBEV_Boosting_Multi-Camera_3D_Object_Detection_with_Cross-Modal_Knowledge_Distillation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.15109", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Video_Task_Decathlon_Unifying_Image_and_Video_Tasks_in_Autonomous_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04422", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_MV-Map_Offboard_HD-Map_Generation_with_Multi-view_Consistency_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.08851", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Towards_Universal_LiDAR-Based_3D_Object_Detection_by_Multi-Domain_Knowledge_Transfer_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_Forecast-MAE_Self-supervised_Pre-training_for_Motion_Forecasting_with_Masked_Autoencoders_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09882", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qin_UniFusion_Unified_Multi-View_Fusion_Transformer_for_Spatial-Temporal_Representation_in_Birds-Eye-View_ICCV_2023_paper.pdf", "paper_arxiv_id": "2207.08536", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_BEVPlace_Learning_LiDAR-based_Place_Recognition_using_Birds_Eye_View_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.14325", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_CORE_Cooperative_Reconstruction_for_Multi-Agent_Perception_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11514", @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ge_MetaBEV_Solving_Sensor_Failures_for_3D_Detection_and_Map_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.09801", diff --git a/json_data/neural-generative-models.json b/json_data/neural-generative-models.json index 8349fab..5837598 100644 --- a/json_data/neural-generative-models.json +++ b/json_data/neural-generative-models.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Unsupervised_Compositional_Concepts_Discovery_with_Text-to-Image_Generative_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.05357", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Human_Preference_Score_Better_Aligning_Text-to-Image_Models_with_Human_Preference_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14420", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Levi_DLT_Conditioned_layout_generation_with_Joint_Discrete-Continuous_Diffusion_Layout_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.03755", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Van_Le_Anti-DreamBooth_Protecting_Users_from_Personalized_Text-to-image_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15433", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tyszkiewicz_GECCO_Geometrically-Conditioned_Point_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05916", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_DiffDreamer_Towards_Consistent_Unsupervised_Single-view_Scene_Extrapolation_with_Conditional_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.12131", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Karunratanakul_Guided_Motion_Diffusion_for_Controllable_Human_Motion_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.12577", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_COOP_Decoupling_and_Coupling_of_Whole-Body_Grasping_Pose_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Couairon_Zero-Shot_Spatial_Layout_Conditioning_for_Text-to-Image_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.13754", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Alanov_StyleDomain_Efficient_and_Lightweight_Parameterizations_of_StyleGAN_for_One-shot_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.10229", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiang_GRAM-HD_3D-Consistent_Image_Generation_at_High_Resolution_with_Generative_Radiance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.07255", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Your_Diffusion_Model_is_Secretly_a_Zero-Shot_Classifier_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16203", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cui_Learning_Hierarchical_Features_with_Joint_Latent_Space_Energy-Based_Prior_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.09604", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_ActFormer_A_GAN-based_Transformer_towards_General_Action-Conditioned_3D_Human_Motion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2203.07706", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Landscape_Learning_for_Neural_Network_Inversion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.09027", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Everaert_Diffusion_in_Style_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chou_Diffusion-SDF_Conditional_Generative_Modeling_of_Signed_Distance_Functions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.13757", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_GETAvatar_Generative_Textured_Meshes_for_Animatable_Human_Avatars_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Agarwal_A-STAR_Test-time_Attention_Segregation_and_Retention_for_Text-to-image_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.14544", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_TF-ICON_Diffusion-Based_Training-Free_Cross-Domain_Image_Composition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12493", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qian_Breaking_The_Limits_of_Text-conditioned_3D_Motion_Synthesis_with_Elaborative_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Barquero_BeLFusion_Latent_Diffusion_for_Behavior-Driven_Human_Motion_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.14304", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hertz_Delta_Denoising_Score_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.07090", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Mimic3D_Thriving_3D-Aware_GANs_via_3D-to-2D_Imitation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09036", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Raj_DreamBooth3D_Subject-Driven_Text-to-3D_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13508", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_Feature_Proliferation_--_the_Cancer_in_StyleGAN_and_its_Treatments_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.08921", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kicanaoglu_Unsupervised_Facial_Performance_Editing_via_Vector-Quantized_StyleGAN_Representations_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiang_3D-aware_Image_Generation_using_2D_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17905", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Neural_Collage_Transfer_Artistic_Reconstruction_via_Material_Manipulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2311.02202", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_Phasic_Content_Fusing_Diffusion_Model_with_Directional_Distribution_Consistency_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03729", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Single-Stage_Diffusion_NeRF_A_Unified_Approach_to_3D_Generation_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06714", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gandikota_Erasing_Concepts_from_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.07345", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yuan_Make_Encoder_Great_Again_in_3D_GAN_Inversion_through_Geometry_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12326", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chang_HairNeRF_Geometry-Aware_Image_Synthesis_for_Hairstyle_Transfer_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/object-pose-estimation-and-tracking.json b/json_data/object-pose-estimation-and-tracking.json index 13c02da..f47ed92 100644 --- a/json_data/object-pose-estimation-and-tracking.json +++ b/json_data/object-pose-estimation-and-tracking.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_MixCycle_Mixup_Assisted_Semi-Supervised_3D_Single_Object_Tracking_with_Cycle_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09219", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Deep_Fusion_Transformer_Network_with_Weighted_Vector-Wise_Keypoints_Voting_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05438", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_IST-Net_Prior-Free_Category-Level_Pose_Estimation_with_Implicit_Space_Transformation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13479", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Adaptive_and_Background-Aware_Vision_Transformer_for_Real-Time_UAV_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_VI-Net_Boosting_Category-level_6D_Object_Pose_Estimation_via_Learning_Decoupled_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09916", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Tracking_by_Natural_Language_Specification_with_Long_Short-term_Context_Decoupling_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lian_CheckerPose_Progressive_Dense_Keypoint_Localization_for_Object_Pose_Estimation_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16874", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Deep_Active_Contours_for_Real-time_6-DoF_Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Learning_Symmetry-Aware_Geometry_Correspondences_for_6D_Object_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Query6DoF_Learning_Sparse_Queries_as_Implicit_Shape_Prior_for_Category-Level_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wan_SOCS_Semantically-Aware_Object_Coordinate_Space_for_Category-Level_6D_Object_Pose_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10346", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hai_Pseudo_Flow_Consistency_for_Self-Supervised_6D_Object_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10016", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rozumnyi_Tracking_by_3D_Model_Estimation_of_Unknown_Objects_in_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06419", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_Algebraically_Rigorous_Quaternion_Framework_for_the_Neural_Network_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Linear-Covariance_Loss_for_End-to-End_Learning_of_6D_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11516", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pautrat_Vanishing_Point_Estimation_in_Uncalibrated_Images_with_Prior_Gravity_Direction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10694", diff --git a/json_data/photogrammetry-and-remote-sensing.json b/json_data/photogrammetry-and-remote-sensing.json index 97f8a71..71b7a7e 100644 --- a/json_data/photogrammetry-and-remote-sensing.json +++ b/json_data/photogrammetry-and-remote-sensing.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zorzi_RePolyWorld_-_A_Graph_Neural_Network_for_Polygonal_Scene_Parsing_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bastani_SatlasPretrain_A_Large-Scale_Dataset_for_Remote_Sensing_Image_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.15660", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Large-Scale_Land_Cover_Mapping_with_Fine-Grained_Classes_via_Class-Aware_Semi-Supervised_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Large_Selective_Kernel_Network_for_Remote_Sensing_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09030", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mendieta_Towards_Geospatial_Foundation_Models_via_Continual_Pretraining_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.04476", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Regularized_Primitive_Graph_Learning_for_Unified_Vector_Mapping_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.13963", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Class_Prior-Free_Positive-Unlabeled_Learning_with_Taylor_Variational_Loss_for_Hyperspectral_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.15081", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bernhard_MapFormer_Boosting_Change_Detection_by_Using_Pre-change_Information_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17859", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Deuser_Sample4Geo_Hard_Negative_Sampling_For_Cross-View_Geo-Localisation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11851", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_PanFlowNet_A_Flow-Based_Deep_Network_for_Pan-Sharpening_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.07774", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Seeing_Beyond_the_Patch_Scale-Adaptive_Semantic_Segmentation_of_High-resolution_Remote_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.15372", diff --git a/json_data/privacy-security-fairness-and-explainability.json b/json_data/privacy-security-fairness-and-explainability.json index 0332345..cd1bf70 100644 --- a/json_data/privacy-security-fairness-and-explainability.json +++ b/json_data/privacy-security-fairness-and-explainability.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Zolly_Zoom_Focal_Length_Correctly_for_Perspective-Distorted_Human_Mesh_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13796", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_ACLS_Adaptive_and_Conditional_Label_Smoothing_for_Network_Calibration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11911", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_PGFed_Personalize_Each_Clients_Global_Objective_for_Federated_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.01448", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Overwriting_Pretrained_Bias_with_Finetuning_Data_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06167", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_ITI-GEN_Inclusive_Text-to-Image_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05569", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hesse_FunnyBirds_A_Synthetic_Vision_Dataset_for_a_Part-Based_Analysis_of_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06248", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dai_X-VoE_Measuring_eXplanatory_Violation_of_Expectation_in_Physical_Events_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10441", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_Adaptive_Testing_of_Computer_Vision_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.02774", diff --git a/json_data/recognition-categorization.json b/json_data/recognition-categorization.json index 07a65d6..7632778 100644 --- a/json_data/recognition-categorization.json +++ b/json_data/recognition-categorization.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Cross_Contrasting_Feature_Perturbation_for_Domain_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12502", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_Flexible_Visual_Recognition_by_Evidential_Modeling_of_Confusion_and_Ignorance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.07403", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Abdelfattah_CDUL_CLIP-Driven_Unsupervised_Learning_for_Multi-Label_Image_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16634", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Noh_RankMixup_Ranking-Based_Mixup_Training_for_Network_Calibration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11990", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_Label-Noise_Learning_with_Intrinsically_Long-Tailed_Data_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.09833", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Parallel_Attention_Interaction_Network_for_Few-Shot_Skeleton-Based_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Rethinking_Mobile_Block_for_Efficient_Attention-based_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.01146", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Read-only_Prompt_Optimization_for_Vision-Language_Few-shot_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14960", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Understanding_Self-attention_Mechanism_via_Dynamical_System_Perspective_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09939", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Learning_in_Imperfect_Environment_Multi-Label_Classification_with_Long-Tailed_Distribution_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.10539", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_What_do_neural_networks_learn_in_image_classification_A_frequency_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09829", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_Inducing_Neural_Collapse_to_a_Fixed_Hierarchy-Aware_Frame_for_Reducing_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05689", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Averly_Unified_Out-Of-Distribution_Detection_A_Model-Specific_Perspective_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06813", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jeon_A_Unified_Framework_for_Robustness_on_Diverse_Sampling_Errors_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Scene-Aware_Label_Graph_Learning_for_Multi-Label_Image_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xia_Holistic_Label_Correction_for_Noisy_Multi-Label_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_Strip-MLP_Efficient_Token_Interaction_for_Vision_MLP_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11458", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_EQ-Net_Elastic_Quantization_Neural_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07650", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shao_Data-free_Knowledge_Distillation_for_Fine-grained_Visual_Categorization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_Shift_from_Texture-bias_to_Shape-bias_Edge_Deformation-based_Augmentation_for_Robust_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Latent-OFER_Detect_Mask_and_Reconstruct_with_Latent_Vectors_for_Occluded_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11404", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_DR-Tune_Improving_Fine-tuning_of_Pretrained_Visual_Models_by_Distribution_Regularization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12058", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_Understanding_the_Feature_Norm_for_Out-of-Distribution_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.05316", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Du_Multi-View_Active_Fine-Grained_Visual_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.01153", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_DIFFGUARD_Semantic_Mismatch-Guided_Out-of-Distribution_Detection_Using_Pre-Trained_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07687", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Task-aware_Adaptive_Learning_for_Cross-domain_Few-shot_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Improving_Adversarial_Robustness_of_Masked_Autoencoders_via_Test-time_Frequency-domain_Prompting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10315", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Saliency_Regularization_for_Self-Training_with_Partial_Annotations_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Learning_Gabor_Texture_Features_for_Fine-Grained_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05396", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_UniFormerV2_Unlocking_the_Potential_of_Image_ViTs_for_Video_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.09552", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_RankMatch_Fostering_Confidence_and_Consistency_in_Learning_with_Noisy_Labels_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_MetaGCD_Learning_to_Continually_Learn_in_Generalized_Category_Discovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11063", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shen_FerKD_Surgical_Label_Adaptation_for_Efficient_Distillation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Point-Query_Quadtree_for_Crowd_Counting_Localization_and_More_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13814", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_Nearest_Neighbor_Guidance_for_Out-of-Distribution_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Bayesian_Optimization_Meets_Self-Distillation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.12666", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_When_Prompt-based_Incremental_Learning_Does_Not_Meet_Strong_Pretraining_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10445", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hou_When_to_Learn_What_Model-Adaptive_Data_Augmentation_Curriculum_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04747", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chiaroni_Parametric_Information_Maximization_for_Generalized_Category_Discovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.00334", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xing_Boosting_Few-shot_Action_Recognition_with_Graph-guided_Hybrid_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09346", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Domain_Generalization_via_Rationale_Invariance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11158", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Masked_Spiking_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.01208", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_Prototype_Reminiscence_and_Augmented_Asymmetric_Knowledge_Aggregation_for_Non-Exemplar_Class-Incremental_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Distilled_Reverse_Attention_Network_for_Open-world_Compositional_Zero-Shot_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.00404", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_Candidate-aware_Selective_Disambiguation_Based_On_Normalized_Entropy_for_Instance-dependent_Partial-label_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_CLIPN_for_Zero-Shot_OOD_Detection_Teaching_CLIP_to_Say_No_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12213", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Self-similarity_Driven_Scale-invariant_Learning_for_Weakly_Supervised_Person_Search_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.12986", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ahn_Sample-wise_Label_Confidence_Incorporation_for_Learning_with_Noisy_Labels_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xia_Combating_Noisy_Labels_with_Sample_Selection_by_Mining_High-Discrepancy_Examples_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Spatial-Aware_Token_for_Weakly_Supervised_Object_Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10438", diff --git a/json_data/recognition-detection.json b/json_data/recognition-detection.json index 741ea08..ac0329a 100644 --- a/json_data/recognition-detection.json +++ b/json_data/recognition-detection.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Random_Boxes_Are_Open-world_Object_Detectors_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08249", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fang_Unleashing_Vanilla_Vision_Transformer_with_Masked_Image_Modeling_for_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": "2204.02964", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xia_CoIn_Contrastive_Instance_Feature_Mining_for_Outdoor_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_A_Dynamic_Dual-Processing_Object_Detection_Framework_Inspired_by_the_Brains_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lv_Anchor-Intermediate_Detector_Decoupling_and_Coupling_Bounding_Boxes_for_Accurate_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.05666", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/McIntosh_Inter-Realization_Channels_Unsupervised_Anomaly_Detection_Beyond_One-Class_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Deep_Equilibrium_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09564", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_RecursiveDet_End-to-End_Region-Based_Recursive_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13619", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yuan_Small_Object_Detection_via_Coarse-to-fine_Proposal_Generation_and_Imitation_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09534", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fu_ASAG_Building_Strong_One-Decoder-Layer_Sparse_Detectors_via_Adaptive_Sparse_Anchor_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09242", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mao_COCO-O_A_Benchmark_for_Object_Detectors_under_Natural_Distribution_Shifts_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12730", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Generative_Prompt_Model_for_Weakly_Supervised_Object_Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09756", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lao_UniKD_Universal_Knowledge_Distillation_for_Mimicking_Homogeneous_or_Heterogeneous_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bae_PNI__Industrial_Anomaly_Detection_using_Position_and_Neighborhood_Information_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.12634", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lao_Masked_Autoencoders_Are_Stronger_Knowledge_Distillers_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_GPA-3D_Geometry-aware_Prototype_Alignment_for_Unsupervised_Domain_Adaptive_3D_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08140", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiao_ADNet_Lane_Shape_Prediction_via_Anchor_Decomposition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10481", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Periodically_Exchange_Teacher-Student_for_Source-Free_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Towards_Fair_and_Comprehensive_Comparisons_for_Image-Based_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.05447", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Monocular_3D_Object_Detection_with_Bounding_Box_Denoising_in_3D_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01289", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Template-guided_Hierarchical_Feature_Restoration_for_Anomaly_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_ALWOD_Active_Learning_for_Weakly-Supervised_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.07914", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_ProtoFL_Unsupervised_Federated_Learning_via_Prototypical_Distillation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12450", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lei_Efficient_Adaptive_Human-Object_Interaction_Detection_with_Concept-guided_Memory_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03696", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Detection_Transformer_with_Stable_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.04742", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Distilling_DETR_with_Visual-Linguistic_Knowledge_for_Open-Vocabulary_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_Anomaly_Detection_Under_Distribution_Shift_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13845", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bhowmik_Detecting_Objects_with_Context-Likelihood_Graphs_and_Graph_Refinement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.12395", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_Unsupervised_Object_Localization_with_Representer_Point_Selection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04172", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_DETR_Does_Not_Need_Multi-Scale_or_Locality_Design_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.01904", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Su_Deep_Directly-Trained_Spiking_Neural_Networks_for_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11411", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Schinagl_GACE_Geometry_Aware_Confidence_Enhancement_for_Black-Box_3D_Object_Detectors_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.20319", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Teng_StageInteractor_Query-based_Object_Detector_with_Cross-stage_Interaction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.04978", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pu_Adaptive_Rotated_Convolution_for_Rotated_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.07820", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Decoupled_DETR_Spatially_Disentangling_Localization_and_Classification_for_Improved_End-to-End_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.15955", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Exploring_Transformers_for_Open-world_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04206", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_DDG-Net_Discriminability-Driven_Graph_Network_for_Weakly-supervised_Temporal_Action_Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16415", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Group_DETR_Fast_DETR_Training_with_Group-Wise_One-to-Many_Assignment_ICCV_2023_paper.pdf", "paper_arxiv_id": "2207.13085", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Category-aware_Allocation_Transformer_for_Weakly_Supervised_Object_Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_The_Devil_is_in_the_Crack_Orientation_A_New_Perspective_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pei_Clusterformer_Cluster-based_Transformer_for_3D_Object_Detection_in_Point_Clouds_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Less_is_More_Focus_Attention_for_Efficient_DETR_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12612", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_DFA3D_3D_Deformable_Attention_For_2D-to-3D_Feature_Lifting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12972", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Multi-Label_Self-Supervised_Learning_with_Scene_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03286", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_Cascade-DETR_Delving_into_High-Quality_Universal_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11035", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Representation_Disparity-aware_Distillation_for_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10308", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hashmi_FeatEnHancer_Enhancing_Hierarchical_Features_for_Object_Detection_and_Beyond_Under_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03594", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_DetZero_Rethinking_Offboard_3D_Object_Detection_with_Long-term_Sequential_Point_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.06023", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zong_DETRs_with_Collaborative_Hybrid_Assignments_Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.12860", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Open-Vocabulary_Object_Detection_With_an_Open_Corpus_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Suri_SparseDet_Improving_Sparsely_Annotated_Object_Detection_with_Pseudo-positive_Mining_ICCV_2023_paper.pdf", "paper_arxiv_id": "2201.04620", @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Unsupervised_Surface_Anomaly_Detection_with_Diffusion_Probabilistic_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_UniTR_A_Unified_and_Efficient_Multi-Modal_Transformer_for_Birds-Eye-View_Representation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07732", @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yao_Focus_the_Discrepancy_Intra-_and_Inter-Correlation_Learning_for_Image_Anomaly_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.02983", @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_MonoNeRD_NeRF-like_Representations_for_Monocular_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09421", @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Integrally_Migrating_Pre-trained_Transformer_Encoder-decoders_for_Visual_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2205.09613", @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Generating_Dynamic_Kernels_via_Transformers_for_Lane_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Meta-ZSDETR_Zero-shot_DETR_with_Meta-learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09540", @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Spatial_Self-Distillation_for_Object_Detection_with_Inaccurate_Bounding_Boxes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12101", @@ -1485,6 +1544,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_AlignDet_Aligning_Pre-training_and_Fine-tuning_in_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11077", @@ -1510,6 +1570,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tu_MULLER_Multilayer_Laplacian_Resizer_for_Vision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02859", @@ -1535,6 +1596,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Unilaterally_Aggregated_Contrastive_Learning_with_Hierarchical_Augmentation_for_Anomaly_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10155", @@ -1560,6 +1622,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chang_DETRDistill_A_Universal_Knowledge_Distillation_Framework_for_DETR-families_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.10156", @@ -1585,6 +1648,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Delving_into_Motion-Aware_Matching_for_Monocular_3D_Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11607", @@ -1610,6 +1674,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_FB-BEV_BEV_Representation_from_Forward-Backward_View_Transformations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.02236", @@ -1635,6 +1700,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Learning_from_Noisy_Data_for_Semi-Supervised_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1660,6 +1726,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Boosting_Long-tailed_Object_Detection_via_Step-wise_Learning_on_Smooth-tail_Data_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.12833", @@ -1685,6 +1752,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Objects_Do_Not_Disappear_Video_Object_Detection_by_Single-Frame_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04770", @@ -1710,6 +1778,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Unified_Visual_Relationship_Detection_with_Vision_and_Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08998", @@ -1735,6 +1804,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Universal_Domain_Adaptation_via_Compressive_Attention_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.11862", @@ -1760,6 +1830,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Unsupervised_Domain_Adaptive_Detection_with_Network_Stability_Analysis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08182", @@ -1785,6 +1856,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tu_ImGeoNet_Image-induced_Geometry-aware_Voxel_Representation_for_Multi-view_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09098", @@ -1810,6 +1882,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yin_Cyclic-Bootstrap_Labeling_for_Weakly_Supervised_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05991", diff --git a/json_data/recognition-retrieval.json b/json_data/recognition-retrieval.json index 44905df..4c36d0a 100644 --- a/json_data/recognition-retrieval.json +++ b/json_data/recognition-retrieval.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_Unsupervised_Feature_Representation_Learning_for_Domain-generalized_Cross-domain_Image_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Baranchuk_DEDRIFT_Robust_Similarity_Search_under_Content_Drift_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.02752", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shao_Global_Features_are_All_You_Need_for_Image_Retrieval_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06954", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_HSE_Hybrid_Species_Embedding_for_Deep_Metric_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zou_Discrepant_and_Multi-Instance_Proxies_for_Unsupervised_Person_Re-Identification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Towards_Grand_Unified_Representation_Learning_for_Unsupervised_Visible-Infrared_Person_Re-Identification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Berton_EigenPlaces_Training_Viewpoint_Robust_Models_for_Visual_Place_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10832", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_Simple_Baselines_for_Interactive_Video_Retrieval_with_Questions_and_Answers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10402", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Fan-Beam_Binarization_Difference_Projection_FB-BDP_A_Novel_Local_Object_Descriptor_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_Conditional_Cross_Attention_Network_for_Multi-Space_Embedding_without_Entanglement_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13254", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Learning_Concordant_Attention_via_Target-aware_Alignment_for_Visible-Infrared_Person_Re-identification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ahmad_Person_Re-Identification_without_Identification_via_Event_anonymization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04402", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Trivigno_DivideClassify_Fine-Grained_Classification_for_City-Wide_Visual_Geo-Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mohwald_Dark_Side_Augmentation_Generating_Diverse_Night_Examples_for_Metric_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16351", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guan_PIDRo_Parallel_Isomeric_Attention_with_Dynamic_Routing_for_Text-Video_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shao_Unified_Pre-Training_with_Pseudo_Texts_for_Text-To-Image_Person_Re-Identification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01420", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Modality_Unifying_Network_for_Visible-Infrared_Person_Re-Identification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.06262", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_DeepChange_A_Long-Term_Person_Re-Identification_Benchmark_with_Clothes_Change_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Luo_LexLIP_Lexicon-Bottlenecked_Language-Image_Pre-Training_for_Large-Scale_Image-Text_Sparse_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.02908", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_Dual_Pseudo-Labels_Interactive_Self-Training_for_Semi-Supervised_Visible-Infrared_Person_Re-Identification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_BT2_Backward-compatible_Training_with_Basis_Transformation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.03989", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Prototypical_Mixing_and_Retrieval-Based_Refinement_for_Label_Noise-Resistant_Image_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Learning_Spatial-context-aware_Global_Visual_Feature_Representation_for_Instance_Image_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Coarse-to-Fine_Learning_Compact_Discriminative_Representation_for_Single-Stage_Image_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04008", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fang_Visible-Infrared_Person_Re-Identification_via_Semantic_Alignment_and_Affinity_Inference_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ni_Part-Aware_Transformer_for_Generalizable_Person_Re-identification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03322", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ypsilantis_Towards_Universal_Image_Embeddings_A_Large-Scale_Dataset_and_Challenge_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01858", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Dual_Learning_with_Dynamic_Knowledge_Distillation_for_Partially_Relevant_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Fine-grained_Unsupervised_Domain_Adaptation_for_Gait_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pal_FashionNTM_Multi-turn_Fashion_Image_Retrieval_via_Cascaded_Memory_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10170", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guan_CrossLoc3D_Aerial-Ground_Cross-Source_3D_Place_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17778", diff --git a/json_data/recognition-segmentation-and-shape-analysis.json b/json_data/recognition-segmentation-and-shape-analysis.json index 42fad85..c30528b 100644 --- a/json_data/recognition-segmentation-and-shape-analysis.json +++ b/json_data/recognition-segmentation-and-shape-analysis.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kirillov_Segment_Anything_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02643", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chassat_Shape_Analysis_of_Euclidean_Curves_under_Frenet-Serret_Framework_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nandan_Unmasking_Anomalies_in_Road-Scene_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13316", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qi_High_Quality_Entity_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.05776", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Towards_Open-Vocabulary_Video_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01715", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_Beyond_One-to-One_Rethinking_the_Referring_Image_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13853", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_Multiple_Instance_Learning_Framework_with_Masked_Hard_Instance_Mining_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15254", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Reed_Scale-MAE_A_Scale-Aware_Masked_Autoencoder_for_Multiscale_Geospatial_Representation_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.14532", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Progressive_Spatio-Temporal_Prototype_Matching_for_Text-Video_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_Towards_Deeply_Unified_Depth-aware_Panoptic_Segmentation_with_Bi-directional_Guidance_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14786", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_LogicSeg_Parsing_Visual_Semantics_with_Neural_Logic_Learning_and_Reasoning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.13556", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gupta_ASIC_Aligning_Sparse_in-the-wild_Image_Collections_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16201", diff --git a/json_data/representation-learning.json b/json_data/representation-learning.json index 4a23af9..339d9d7 100644 --- a/json_data/representation-learning.json +++ b/json_data/representation-learning.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_WDiscOOD_Out-of-Distribution_Detection_via_Whitened_Linear_Discriminant_Analysis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.07543", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wen_Pairwise_Similarity_Learning_is_SimPLE_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.09449", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_No_Fear_of_Classifier_Biases_Neural_Collapse_Inspired_Federated_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10058", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gu_Generalizable_Neural_Fields_as_Partially_Observed_Neural_Processes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.06660", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mentzer_M2T_Masking_Transformers_Twice_for_Faster_Decoding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.07313", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Psomas_Keep_It_SimPool_Who_Said_Supervised_Transformers_Suffer_from_Attention_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.06891", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Improving_Pixel-based_MIM_by_Reducing_Wasted_Modeling_Capability_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.00261", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Learning_Image-Adaptive_Codebooks_for_Class-Agnostic_Image_Restoration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.06513", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chavhan_Quality_Diversity_for_Visual_Pre-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hou_Subclass-balancing_Contrastive_Learning_for_Long-tailed_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.15925", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sotiris_Mastering_Spatial_Graph_Prediction_of_Road_Networks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.00828", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/van_Spengler_Poincare_ResNet_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14027", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Exploring_Model_Transferability_through_the_Lens_of_Potential_Energy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.15074", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Improving_CLIP_Fine-tuning_Performance_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ding_Unsupervised_Manifold_Linearizing_and_Clustering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.01805", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gurbuz_Generalized_Sum_Pooling_for_Metric_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09228", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Partition_Speeds_Up_Learning_Implicit_Neural_Representations_Based_on_Exponential-Increase_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.14184", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Singh_The_Effectiveness_of_MAE_Pre-Pretraining_for_Billion-Scale_Pretraining_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13496", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiao_Token-Label_Alignment_for_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.06455", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jain_Efficiently_Robustify_Pre-Trained_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.07499", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xie_OFVL-MS_Once_for_Visual_Localization_across_Multiple_Indoor_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11928", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_Feature_Prediction_Diffusion_Model_for_Video_Anomaly_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Joint_Implicit_Neural_Representation_for_High-fidelity_and_Compact_Vector_Fonts_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_How_Far_Pre-trained_Models_Are_from_Neural_Collapse_on_the_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_OPERA_Omni-Supervised_Representation_Learning_with_Hierarchical_Supervisions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.05557", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ranasinghe_Perceptual_Grouping_in_Contrastive_Vision-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.09996", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Fully_Attentional_Networks_with_Self-emerging_Token_Labeling_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tian_Instance_and_Category_Supervision_are_Alternate_Learners_for_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_SkeletonMAE_Graph-based_Masked_Autoencoder_for_Skeleton_Sequence_Pre-training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08476", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_Motion-Guided_Masking_for_Spatiotemporal_Representation_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12962", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Data_Augmented_Flatness-aware_Gradient_Projection_for_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Take-A-Photo_3D-to-2D_Generative_Pre-training_of_Point_Cloud_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14971", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_BiViT_Extremely_Compressed_Binary_Vision_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.07091", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sameni_Spatio-Temporal_Crop_Aggregation_for_Video_Representation_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.17042", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Hierarchical_Visual_Primitive_Experts_for_Compositional_Zero-Shot_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04016", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Quan_Semantic_Information_in_Contrastive_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bai_Cross-Domain_Product_Representation_Learning_for_Rich-Content_E-Commerce_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05550", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_Contrastive_Continuity_on_Augmentation_Stability_Rehearsal_for_Continual_Self-Supervised_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yucel_HybridAugment_Unified_Frequency_Spectra_Perturbations_for_Model_Robustness_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11823", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Unleashing_Text-to-Image_Diffusion_Models_for_Visual_Perception_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.02153", diff --git a/json_data/scene-analysis-and-understanding.json b/json_data/scene-analysis-and-understanding.json index 47cf444..05be50e 100644 --- a/json_data/scene-analysis-and-understanding.json +++ b/json_data/scene-analysis-and-understanding.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Generalized_Few-Shot_Point_Cloud_Segmentation_via_Geometric_Words_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11222", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_Boosting_3-DoF_Ground-to-Satellite_Camera_Localization_Accuracy_via_Geometry-Guided_Cross-View_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08015", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_EP2P-Loc_End-to-End_3D_Point_to_2D_Pixel_Localization_for_Large-Scale_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.07471", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Multi-task_View_Synthesis_with_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.17450", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Multi-Task_Learning_with_Knowledge_Distillation_for_Dense_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Visually-Prompted_Language_Model_for_Fine-Grained_Scene_Graph_Generation_in_an_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13233", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xia_CMDA_Cross-Modality_Domain_Adaptation_for_Nighttime_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15942", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_VQA-GNN_Reasoning_with_Multimodal_Knowledge_via_Graph_Neural_Networks_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2205.11501", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Disentangle_then_Parse_Night-time_Semantic_Segmentation_with_Illumination_Disentanglement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09362", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_Visual_Traffic_Knowledge_Graph_Generation_from_Scene_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tu_Agglomerative_Transformer_for_Human-Object_Interaction_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08370", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_3D_Neural_Embedding_Likelihood_Probabilistic_Inverse_Graphics_for_Robust_6D_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.03744", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_HiLo_Exploiting_High_Low_Frequency_Relations_for_Unbiased_Panoptic_Scene_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15994", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yuan_RLIPv2_Fast_Scaling_of_Relational_Language-Image_Pre-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09351", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_UniSeg_A_Unified_Multi-Modal_LiDAR_Segmentation_Network_and_the_OpenPCSeg_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05573", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_See_More_and_Know_More_Zero-shot_Point_Cloud_Segmentation_via_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10782", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Compositional_Feature_Augmentation_for_Unbiased_Scene_Graph_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06712", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Patil_Multi-weather_Image_Restoration_via_Domain_Translation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Aberdam_CLIPTER_Looking_at_the_Bigger_Picture_in_Scene_Text_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.07464", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ganz_Towards_Models_that_Can_See_and_Read_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.07389", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_SurroundOcc_Multi-camera_3D_Occupancy_Prediction_for_Autonomous_Driving_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09551", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ji_DDP_Diffusion_Model_for_Dense_Visual_Prediction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17559", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qian_Understanding_3D_Object_Interaction_from_a_Single_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.09664", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_ObjectSDF_Improved_Object-Compositional_Neural_Implicit_Surfaces_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07868", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhong_Improving_Equivariance_in_State-of-the-Art_Supervised_Depth_and_Normal_Predictors_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16646", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yin_CrossMatch_Source-Free_Domain_Adaptive_Semantic_Segmentation_via_Cross-Modal_Consistency_Training_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_Semantic_Attention_Flow_Fields_for_Monocular_Dynamic_Scene_Decomposition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.01526", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_Holistic_Geometric_Feature_Learning_for_Structured_Reconstruction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.09622", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Scalable_Multi-Temporal_Remote_Sensing_Change_Data_Generation_via_Simulating_Stochastic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.17031", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_TaskExpert_Dynamically_Assembling_Multi-Task_Representations_with_Memorial_Mixture-of-Experts_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15324", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_Thinking_Image_Color_Aesthetics_Assessment_Models_Datasets_and_Benchmarks_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_STEERER_Resolving_Scale_Variations_for_Counting_and_Localization_via_Selective_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10468", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tonini_Object-aware_Gaze_Target_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09662", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Weakly_Supervised_Referring_Image_Segmentation_with_Intra-Chunk_and_Inter-Chunk_Consistency_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sudhakaran_Vision_Relation_Transformer_for_Unbiased_Scene_Graph_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09472", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_DDIT_Semantic_Scene_Completion_via_Deformable_Deep_Implicit_Templates_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_DQS3D_Densely-matched_Quantization-aware_Semi-supervised_3D_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.13031", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Shape_Anchor_Guided_Holistic_Indoor_Scene_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11133", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sarkar_SGAligner_3D_Scene_Alignment_with_Scene_Graphs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.14880", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Betrayed_by_Captions_Joint_Caption_Grounding_and_Generation_for_Open_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.00805", diff --git a/json_data/segmentation-grouping-and-shape-analysis.json b/json_data/segmentation-grouping-and-shape-analysis.json index c272b07..12e4d0c 100644 --- a/json_data/segmentation-grouping-and-shape-analysis.json +++ b/json_data/segmentation-grouping-and-shape-analysis.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Al_Khatib_3D_Instance_Segmentation_via_Enhanced_Spatial_and_Semantic_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Deng_Learning_Neural_Eigenfunctions_for_Unsupervised_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02841", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Divide_and_Conquer_3D_Point_Cloud_Instance_Segmentation_With_Point-Wise_ICCV_2023_paper.pdf", "paper_arxiv_id": "2207.11209", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Point2Mask_Point-supervised_Panoptic_Segmentation_via_Optimal_Transport_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.01779", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gholamian_Handwritten_and_Printed_Text_Segmentation_A_Signature_Case_Study_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07887", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Semantic-Aware_Implicit_Template_Learning_via_Part_Deformation_Consistency_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11916", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_LeaF_Learning_Frames_for_4D_Point_Cloud_Sequence_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jo_MARS_Model-agnostic_Biased_Object_Removal_without_Additional_Supervision_for_Weakly-Supervised_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.09913", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Peng_USAGE_A_Unified_Seed_Area_Generation_Paradigm_for_Weakly_Supervised_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.07806", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bekuzarov_XMem_Production-level_Video_Segmentation_From_Few_Annotated_Frames_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15958", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_SIGMA_Scale-Invariant_Global_Sparse_Shape_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08393", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Self-Calibrated_Cross_Attention_Network_for_Few-Shot_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09294", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Multi-granularity_Interaction_Simulation_for_Unsupervised_Interactive_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13399", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Texture_Learning_Domain_Randomization_for_Domain_Generalized_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11546", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Su_Unsupervised_Video_Object_Segmentation_with_Online_Adversarial_Self-Tuning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Exploring_Open-Vocabulary_Semantic_Segmentation_from_CLIP_Vision_Encoder_Distillation_Only_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.00450", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nayal_RbA_Segmenting_Unknown_Regions_Rejected_by_All_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.14293", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ravindran_SEMPART_Self-supervised_Multi-resolution_Partitioning_of_Image_Semantics_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.10972", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Safadoust_Multi-Object_Discovery_by_Low-Dimensional_Object_Motion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08027", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_MemorySeg_Online_LiDAR_Semantic_Segmentation_with_a_Latent_Memory_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Treating_Pseudo-labels_Generation_as_Image_Matting_for_Weakly_Supervised_Semantic_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_BoxSnake_Polygonal_Instance_Segmentation_with_Box_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11630", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_Dynamic_Token_Pruning_in_Plain_Vision_Transformers_for_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.01045", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Instance_Neural_Radiance_Field_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.04395", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_Global_Knowledge_Calibration_for_Fast_Open-Vocabulary_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09181", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Peng_Diffusion-based_Image_Translation_with_Label_Guidance_for_Domain_Adaptive_Semantic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12350", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": "https://gitee.com/mindspore/models", "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Boosting_Semantic_Segmentation_from_the_Perspective_of_Explicit_Class_Embeddings_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12894", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lamdouar_The_Making_and_Breaking_of_Camouflage_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03899", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_CoinSeg_Contrast_Inter-_and_Intra-_Class_Representations_for_Incremental_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Few-Shot_Physically-Aware_Articulated_Mesh_Generation_via_Hierarchical_Deformation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10898", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_HAL3D_Hierarchical_Active_Learning_for_Fine-Grained_3D_Part_Labeling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.10460", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_FreeCOS_Self-Supervised_Learning_from_Fractals_and_Unlabeled_Images_for_Curvilinear_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07245", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_MasQCLIP_for_Open-Vocabulary_Universal_Image_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ying_CTVIS_Consistent_Training_for_Online_Video_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12616", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_A_Generalist_Framework_for_Panoptic_Segmentation_of_Images_and_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.06366", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Miao_Spectrum-guided_Multi-granularity_Referring_Video_Object_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13537", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Space_Engage_Collaborative_Space_Supervision_for_Contrastive-Based_Semi-Supervised_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09755", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Adaptive_Superpixel_for_Active_Learning_in_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16817", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mao_Multimodal_Variational_Auto-encoder_based_Audio-Visual_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yuan_Isomer_Isomerous_Transformer_for_Zero-shot_Video_Object_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06693", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_2D-3D_Interlaced_Transformer_for_Point_Cloud_Segmentation_with_Scene-Level_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dombrowski_Foreground-Background_Separation_through_Concept_Distillation_from_Generative_Image_Foundation_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.14306", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_SegPrompt_Boosting_Open-World_Segmentation_via_Category-Level_Prompt_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06531", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Monte_Carlo_Linear_Clustering_with_Single-Point_Supervision_is_Enough_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.04442", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_A_Simple_Framework_for_Open-Vocabulary_Segmentation_and_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08131", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/WU_Source-free_Depth_for_Object_Pop-out_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.05370", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rana_DynaMITe_Dynamic_Query_Bootstrapping_for_Multi-object_Interactive_Segmentation_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06668", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Atmospheric_Transmission_and_Thermal_Inertia_Induced_Blind_Road_Segmentation_with_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Informative_Data_Mining_for_One-Shot_Cross-Domain_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14241", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Homography_Guided_Temporal_Fusion_for_Road_Line_and_Marking_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_Open-Vocabulary_Semantic_Segmentation_with_Decoupled_One-Pass_Network_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01198", @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_TCOVIS_Temporally_Consistent_Online_Video_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_FPR_False_Positive_Rectification_for_Weakly_Supervised_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zbinden_Stochastic_Segmentation_with_Conditional_Categorical_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08888", @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/BAAI/SegGPT", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_SegGPT_Towards_Segmenting_Everything_in_Context_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.03284", @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Open-vocabulary_Panoptic_Segmentation_with_Embedding_Modulation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11324", @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Residual_Pattern_Learning_for_Pixel-Wise_Out-of-Distribution_Detection_in_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.14512", @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rewatbowornwong_Zero-guidance_Segmentation_Using_Zero_Segment_Labels_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13396", @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Model_Calibration_in_Dense_Classification_with_Adaptive_Label_Perturbation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13539", @@ -1485,6 +1544,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Enhanced_Soft_Label_for_Semi-Supervised_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1510,6 +1570,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cai_MixReorg_Cross-Modal_Mixed_Patch_Reorganization_is_a_Good_Mask_Learner_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04829", @@ -1535,6 +1596,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_DiffuMask_Synthesizing_Images_with_Pixel-level_Annotations_for_Semantic_Segmentation_Using_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11681", @@ -1560,6 +1622,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Alignment_Before_Aggregation_Trajectory_Memory_Retrieval_Network_for_Video_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1585,6 +1648,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Semi-Supervised_Semantic_Segmentation_under_Label_Noise_via_Diverse_Learning_Groups_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1610,6 +1674,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Simons_SUMMIT_Source-Free_Adaptation_of_Uni-Modal_Models_to_Multi-Modal_Targets_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11880", @@ -1635,6 +1700,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hsieh_Class-incremental_Continual_Learning_for_Instance_Segmentation_with_Image-level_Weak_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1660,6 +1726,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_Coarse-to-Fine_Amodal_Segmentation_with_Shape_Prior_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16825", @@ -1685,6 +1752,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_Rethinking_Amodal_Video_Segmentation_from_Learning_Supervised_Signals_with_Object-centric_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.13248", @@ -1710,6 +1778,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_DVIS_Decoupled_Video_Instance_Segmentation_Framework_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.03413", @@ -1735,6 +1804,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Takmaz_3D_Segmentation_of_Humans_in_Point_Clouds_with_Synthetic_Data_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.00786", @@ -1760,6 +1830,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lian_WaterMask_Instance_Segmentation_for_Underwater_Imagery_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1785,6 +1856,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_Tracking_Anything_with_Decoupled_Video_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03903", diff --git a/json_data/self--semi--and-unsupervised-learning.json b/json_data/self--semi--and-unsupervised-learning.json index 70e78fb..9908294 100644 --- a/json_data/self--semi--and-unsupervised-learning.json +++ b/json_data/self--semi--and-unsupervised-learning.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Spencer_Kick_Back__Relax_Learning_to_Reconstruct_the_World_by_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10713", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Novel_Scenes__Classes_Towards_Adaptive_Open-set_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ganeshan_Improving_Unsupervised_Visual_Program_Inference_with_Code_Rewriting_Families_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.14972", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiang_Denoising_Diffusion_Autoencoders_are_Unified_Self-supervised_Learners_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09769", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Self-Ordering_Point_Clouds_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.00961", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rambhatla_MOST_Multiple_Object_Localization_with_Self-Supervised_Transformers_for_Object_Discovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.05387", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_CHORUS__Learning_Canonicalized_3D_Human-Object_Spatial_Relations_from_Unbounded_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12288", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dou_Identity-Seeking_Self-Supervised_Representation_Learning_for_Generalizable_Person_Re-Identification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08887", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Anatomical_Invariance_Modeling_and_Semantic_Alignment_for_Self-supervised_Learning_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.05615", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_IOMatch_Simplifying_Open-Set_Semi-Supervised_Learning_with_Joint_Inliers_and_Outliers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13168", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gui_Enhancing_Sample_Utilization_through_Sample_Adaptive_Augmentation_in_Semi-Supervised_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03598", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_When_Noisy_Labels_Meet_Long_Tail_Dilemmas_A_Representation_Calibration_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.10955", diff --git a/json_data/self--semi--meta--unsupervised-learning.json b/json_data/self--semi--meta--unsupervised-learning.json index c502986..dcadc52 100644 --- a/json_data/self--semi--meta--unsupervised-learning.json +++ b/json_data/self--semi--meta--unsupervised-learning.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Noise2Info_Noisy_Image_to_Information_of_Noise_for_Self-Supervised_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gomel_Box-based_Refinement_for_Weakly_Supervised_and_Unsupervised_Localization_Tasks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03874", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Diverse_Cotraining_Makes_Strong_Semi-Supervised_Segmentor_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09281", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_SSB_Simple_but_Strong_Baseline_for_Boosting_Performance_of_Open-Set_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yuan_Late_Stopping_Avoiding_Confidently_Learning_from_Mislabeled_Examples_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13862", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Ponder_Point_Cloud_Pre-training_via_Neural_Rendering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.00157", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_Semantics-Consistent_Feature_Search_for_Self-Supervised_Visual_Representation_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.06486", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Stable_and_Causal_Inference_for_Discriminative_Self-supervised_Deep_Visual_Representations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08321", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Duan_Towards_Semi-supervised_Learning_with_Non-random_Missing_Labels_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08872", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Hallucination_Improves_the_Performance_of_Unsupervised_Visual_Representation_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12168", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Georgescu_Audiovisual_Masked_Autoencoders_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.05922", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lai_PADCLIP_Pseudo-labeling_with_Adaptive_Debiasing_in_CLIP_for_Unsupervised_Domain_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_Removing_Anomalies_as_Noises_for_Industrial_Defect_Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_SparseMAE_Sparse_Training_Meets_Masked_Autoencoders_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Shrinking_Class_Space_for_Enhanced_Certainty_in_Semi-Supervised_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06777", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_Logic-induced_Diagnostic_Reasoning_for_Semi-supervised_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12595", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_GasMono_Geometry-Aided_Self-Supervised_Monocular_Depth_Estimation_for_Indoor_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16019", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Is_Imitation_All_You_Need_Generalized_Decision-Making_with_Dual-Phase_Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07909", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Estepa_All4One_Symbiotic_Neighbour_Contrastive_Learning_via_Self-Attention_and_Redundancy_Reduction_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09417", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Weakly_Supervised_Learning_of_Semantic_Correspondence_through_Cascaded_Online_Correspondence_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Meng_Tracking_without_Label_Unsupervised_Multiple_Object_Tracking_via_Contrastive_Similarity_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.00942", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cabannes_Active_Self-Supervised_Learning_A_Few_Low-Cost_Relationships_Are_All_You_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15256", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Diffusion_Models_as_Masked_Autoencoders_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.03283", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Taraday_Enhanced_Meta_Label_Correction_for_Coping_with_Label_Corruption_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.12961", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Randomized_Quantization_A_Generic_Augmentation_for_Data_Agnostic_Self-supervised_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.08663", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tian_Prototypes-oriented_Transductive_Few-shot_Learning_with_Conditional_Transport_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03047", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhong_Contrastive_Learning_Relies_More_on_Spatial_Inductive_Bias_Than_Supervised_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_Pseudo-label_Alignment_for_Semi-supervised_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05359", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_CFCG_Semi-Supervised_Semantic_Segmentation_via_Cross-Fusion_and_Contour_Guidance_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Pixel-Wise_Contrastive_Distillation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.00218", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Rethinking_Safe_Semi-supervised_Learning_Transferring_the_Open-set_Problem_to_A_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Towards_Open-Set_Test-Time_Adaptation_Utilizing_the_Wisdom_of_Crowds_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06879", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Gradient-based_Sampling_for_Class_Imbalanced_Semi-supervised_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gu_Remembering_Normality_Memory-guided_Knowledge_Distillation_for_Unsupervised_Anomaly_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Du_Semi-Supervised_Learning_via_Weight-Aware_Distillation_under_Class_Distribution_Mismatch_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11874", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_Label_Shift_Adapter_for_Test-Time_Adaptation_under_Covariate_and_Label_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08810", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_SimMatchV2_Semi-Supervised_Learning_with_Graph_Consistency_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06692", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Unsupervised_Accuracy_Estimation_of_Deep_Visual_Models_using_Domain-Adaptive_Adversarial_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10062", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shvetsova_Learning_by_Sorting_Self-supervised_Learning_with_Group_Ordering_Constraints_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.02009", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Rehman_L-DAWA_Layer-wise_Divergence_Aware_Weight_Aggregation_in_Federated_Self-Supervised_Visual_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07393", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gu_Class-relation_Knowledge_Distillation_for_Novel_Class_Discovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09158", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nakamura_Representation_Uncertainty_in_Self-Supervised_Learning_as_Variational_Inference_ICCV_2023_paper.pdf", "paper_arxiv_id": "2203.11437", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hatem_Point-TTA_Test-Time_Adaptation_for_Point_Cloud_Registration_Using_Multitask_Meta-Auxiliary_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16481", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lebailly_Adaptive_Similarity_Bootstrapping_for_Self-Distillation_Based_Representation_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13606", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sheng_Point_Contrastive_Prediction_with_Semantic_Clustering_for_Self-Supervised_Learning_on_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09247", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_MHCN_A_Hyperbolic_Neural_Network_Model_for_Multi-view_Hierarchical_Clustering_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Salehi_Time_Does_Tell_Self-Supervised_Time-Tuning_of_Dense_Image_Representations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11796", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Colomer_To_Adapt_or_Not_to_Adapt_Real-Time_Adaptation_for_Semantic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15063", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Noh_Simple_and_Effective_Out-of-Distribution_Detection_via_Cosine-based_Softmax_Loss_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Asanomi_MixBag_Bag-Level_Data_Augmentation_for_Learning_from_Label_Proportions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08822", @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shen_Masked_Spatio-Temporal_Structure_Prediction_for_Self-supervised_Learning_on_Point_Cloud_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09245", @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wen_Parametric_Classification_for_Generalized_Category_Discovery_A_Baseline_Study_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.11727", @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Object-Centric_Multiple_Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.00233", @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fang_Locating_Noise_is_Halfway_Denoising_for_Semi-Supervised_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Learning_Semi-supervised_Gaussian_Mixture_Models_for_Generalized_Category_Discovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.06144", @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kloepfer_LoCUS_Learning_Multiscale_3D-consistent_Features_from_Posed_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qian_Stable_Cluster_Discrimination_for_Deep_Clustering_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Long_Cross-modal_Scalable_Hierarchical_Clustering_in_Hyperbolic_space_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Collaborative_Propagation_on_Multiple_Instance_Graphs_for_3D_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.05110", @@ -1485,6 +1544,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qian_Semantics_Meets_Temporal_Correspondence_Self-supervised_Object-centric_Learning_in_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09951", @@ -1510,6 +1570,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Proxy_Anchor-based_Unsupervised_Learning_for_Continuous_Generalized_Category_Discovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10943", @@ -1535,6 +1596,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_DreamTeacher_Pretraining_Image_Backbones_with_Deep_Generative_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07487", @@ -1560,6 +1622,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mirza_MATE_Masked_Autoencoders_are_Online_3D_Test-Time_Learners_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.11432", @@ -1585,6 +1648,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_PADDLES_Phase-Amplitude_Spectrum_Disentangled_Early_Stopping_for_Learning_with_Noisy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.03462", @@ -1610,6 +1674,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/LI_Calibrating_Uncertainty_for_Semi-Supervised_Crowd_Counting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09887", @@ -1635,6 +1700,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Roy_Test_Time_Adaptation_for_Blind_Image_Quality_Assessment_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14735", @@ -1660,6 +1726,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Deep_Multiview_Clustering_by_Contrasting_Cluster_Assignments_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.10769", diff --git a/json_data/transfer-low-shot-and-continual-learning.json b/json_data/transfer-low-shot-and-continual-learning.json index 28d0575..000f57e 100644 --- a/json_data/transfer-low-shot-and-continual-learning.json +++ b/json_data/transfer-low-shot-and-continual-learning.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_Frequency_Guidance_Matters_in_Few-Shot_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_Sensitivity-Aware_Visual_Parameter-Efficient_Fine-Tuning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08566", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_On_the_Robustness_of_Open-World_Test-Time_Training_Self-Training_with_Dynamic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09942", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jung_Generating_Instance-level_Prompts_for_Rehearsal-free_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zang_Boosting_Novel_Category_Discovery_Over_Domains_with_Soft_Contrastive_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.11262", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kang_A_Soft_Nearest-Neighbor_Framework_for_Continual_Semi-Supervised_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.05102", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_GraphEcho_Graph-Driven_Unsupervised_Domain_Adaptation_for_Echocardiogram_Video_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11145", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Suris_ViperGPT_Visual_Inference_via_Python_Execution_for_Reasoning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08128", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Improved_Visual_Fine-tuning_with_Natural_Language_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01489", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_Preparing_the_Future_for_Continual_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_MAP_Towards_Balanced_Generalization_of_IID_and_OOD_through_Model-Agnostic_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pei_Space-time_Prompting_for_Video_Class-incremental_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/transfer-low-shot-continual-long-tail-learning.json b/json_data/transfer-low-shot-continual-long-tail-learning.json index 16ef477..2029583 100644 --- a/json_data/transfer-low-shot-continual-long-tail-learning.json +++ b/json_data/transfer-low-shot-continual-long-tail-learning.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_ImbSAM_A_Closer_Look_at_Sharpness-Aware_Minimization_in_Class-Imbalanced_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07815", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Seo_LFS-GAN_Lifelong_Few-Shot_Image_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11917", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Augmented_Box_Replay_Overcoming_Foreground_Shift_for_Incremental_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12427", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bruggemann_Contrastive_Model_Adaptation_for_Cross-Condition_Robustness_in_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05194", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Towards_Effective_Instance_Discrimination_Contrastive_Loss_for_Unsupervised_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2202.02802", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_Adversarial_Bayesian_Augmentation_for_Single-Source_Domain_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09520", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lyu_Measuring_Asymmetric_Gradient_Discrepancy_in_Parallel_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_CSDA_Learning_Category-Scale_Joint_Feature_for_Domain_Adaptive_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Borup_Distilling_from_Similar_Tasks_for_Transfer_Learning_on_a_Budget_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.12314", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cho_Complementary_Domain_Adaptation_and_Generalization_for_Unsupervised_Continual_Domain_Shift_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15833", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Camera-Driven_Representation_Learning_for_Unsupervised_Domain_Adaptive_Person_Re-identification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11901", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Khan_Introducing_Language_Guidance_in_Prompt-based_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.15827", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Fast_and_Accurate_Transferability_Measurement_by_Evaluating_Intra-class_Feature_Variance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05986", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_A_Unified_Continual_Learning_Framework_with_General_Parameter-Efficient_Tuning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10070", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dinsdale_SFHarmony_Source_Free_Domain_Adaptation_for_Distributed_Neuroimaging_Analysis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15965", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chavan_Towards_Realistic_Evaluation_of_Industrial_Continual_Learning_Scenarios_with_an_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_CDAC_Cross-domain_Attention_Consistency_in_Transformer_for_Domain_Adaptive_Semantic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.14703", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Park_PC-Adapter_Topology-Aware_Adapter_for_Efficient_Domain_Adaption_on_Point_Clouds_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.16936", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_DETA_Denoised_Task_Adaptation_for_Few-Shot_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06315", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Activate_and_Reject_Towards_Safe_Domain_Generalization_under_Category_Shift_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Generalizable_Decision_Boundaries_Dualistic_Meta-Learning_for_Open_Set_Domain_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09391", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Continual_Zero-Shot_Learning_through_Semantically_Guided_Generative_Random_Walks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12366", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Zero-Shot_Point_Cloud_Segmentation_by_Semantic-Visual_Aware_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_MDCS_More_Diverse_Experts_with_Consistency_Self-distillation_for_Long-tailed_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09922", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/B_Building_a_Winning_Team_Selecting_Source_Model_Ensembles_using_a_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02429", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiong_Confidence-based_Visual_Dispersal_for_Few-shot_Unsupervised_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.15575", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_BEV-DG_Cross-Modal_Learning_under_Birds-Eye_View_for_Domain_Generalization_of_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06530", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Samarasinghe_CDFSL-V_Cross-Domain_Few-Shot_Learning_for_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03989", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Herath_Energy-based_Self-Training_and_Normalization_for_Unsupervised_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Regularized_Mask_Tuning_Uncovering_Hidden_Knowledge_in_Pre-Trained_Vision-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15049", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Malepathirana_NAPA-VQ_Neighborhood-Aware_Prototype_Augmentation_with_Vector_Quantization_for_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09297", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_A_Sentence_Speaks_a_Thousand_Images_Domain_Generalization_through_Distilling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.12530", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_ViM_Vision_Middleware_for_Unified_Downstream_Transferring_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06911", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Singh_Learning_to_Learn_How_to_Continuously_Teach_Humans_and_Machines_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.15470", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_A_Good_Student_is_Cooperative_and_Reliable_CNN-Transformer_Collaborative_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12574", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Moon_Online_Class_Incremental_Learning_on_Stochastic_Blurry_Task_Boundary_via_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09303", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Heterogeneous_Forgetting_Compensation_for_Class-Incremental_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03374", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Koh_Disposable_Transfer_Learning_for_Selective_Source_Task_Unlearning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09971", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lee_Online_Continual_Learning_on_Hierarchical_Label_Expansion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14374", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Black-Box_Unsupervised_Domain_Adaptation_with_Bi-Directional_Atkinson-Shiffrin_Memory_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13236", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tao_Local_and_Global_Logit_Adjustments_for_Long-Tailed_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bulat_FS-DETR_Few-Shot_DEtection_TRansformer_with_Prompting_and_without_Re-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.04845", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_Tuning_Pre-trained_Model_via_Moment_Probing_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11342", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Singh_Benchmarking_Low-Shot_Robustness_to_Natural_Distribution_Shifts_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.11263", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Label-Guided_Knowledge_Distillation_for_Continual_Semantic_Segmentation_on_2D_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gholami_ETran_Energy-Based_Transferability_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.02027", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fahes_PODA_Prompt-driven_Zero-shot_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.03241", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Local_Context-Aware_Active_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.12856", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_MRN_Multiplexed_Routing_Network_for_Incremental_Multilingual_Text_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.14758", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Few-Shot_Dataset_Distillation_via_Translative_Pre-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_Wasserstein_Expansible_Variational_Autoencoder_for_Discriminative_and_Generative_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Tangent_Model_Composition_for_Ensembling_and_Continual_Fine-tuning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08114", @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Look_at_the_Neighbor_Distortion-aware_Unsupervised_Domain_Adaptation_for_Panoramic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.05493", @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhou_Homeomorphism_Alignment_for_Unsupervised_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dong_Knowledge_Restore_and_Transfer_for_Multi-Label_Class-Incremental_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.13334", @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jian_Unsupervised_Domain_Adaptation_for_Training_Event-Based_Networks_Using_Contrastive_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12424", @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cetin_A_Simple_Recipe_to_Meta-Learn_Forward_and_Backward_Transfer_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Dynamic_Residual_Classifier_for_Class_Incremental_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.13305", @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Concept-wise_Fine-tuning_Matters_in_Preventing_Negative_Transfer_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1485,6 +1544,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Online_Prototype_Learning_for_Online_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.00301", @@ -1510,6 +1570,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_Bidirectional_Alignment_for_Domain_Adaptive_Detection_with_Transformers_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1535,6 +1596,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Borrowing_Knowledge_From_Pre-trained_Language_Model_A_New_Data-efficient_Visual_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1560,6 +1622,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ge_CLR_Channel-wise_Lightweight_Reprogramming_for_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11386", @@ -1585,6 +1648,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_Multi-Modal_Continual_Test-Time_Adaptation_for_3D_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10457", @@ -1610,6 +1674,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Panos_First_Session_Adaptation_A_Strong_Replay-Free_Baseline_for_Class-Incremental_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13199", @@ -1635,6 +1700,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pal_Domain_Adaptive_Few-Shot_Open-Set_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.12814", @@ -1660,6 +1726,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Rethinking_the_Role_of_Pre-Trained_Networks_in_Source-Free_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.07585", @@ -1685,6 +1752,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Al_Kader_Hammoud_Rapid_Adaptation_in_Online_Continual_Learning_Are_We_Evaluating_It_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.09275", @@ -1710,6 +1778,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Multi-grained_Temporal_Prototype_Learning_for_Few-shot_Video_Object_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11160", @@ -1735,6 +1804,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Dukic_A_Low-Shot_Object_Counting_Network_With_Iterative_Prototype_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.08217", @@ -1760,6 +1830,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gao_Towards_Better_Robustness_against_Common_Corruptions_for_Unsupervised_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1785,6 +1856,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kang_Alleviating_Catastrophic_Forgetting_of_Incremental_Object_Detection_via_Within-Class_and_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1810,6 +1882,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hao_Class-Aware_Patch_Embedding_Adaptation_for_Few-Shot_Image_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1835,6 +1908,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jing_Order-preserving_Consistency_Regularization_for_Domain_Adaptation_and_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.13258", @@ -1860,6 +1934,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sanyal_Domain-Specificity_Inducing_Transformers_for_Source-Free_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14023", @@ -1885,6 +1960,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Diffusion_Model_as_Representation_Learner_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10916", @@ -1910,6 +1986,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Du_s-Adaptive_Decoupled_Prototype_for_Few-Shot_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1935,6 +2012,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jin_Growing_a_Brain_with_Sparsity-Inducing_Generation_for_Continual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1960,6 +2038,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_DomainAdaptor_A_Novel_Approach_to_Test-time_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10297", @@ -1985,6 +2064,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Reconciling_Object-Level_and_Global-Level_Objectives_for_Long-Tail_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2010,6 +2090,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Domain_Generalization_via_Balancing_Training_Difficulty_and_Model_Capability_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.00844", @@ -2035,6 +2116,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hemati_Understanding_Hessian_Alignment_for_Domain_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11778", @@ -2060,6 +2142,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bhattacharjee_Vision_Transformer_Adapters_for_Generalizable_Multitask_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12372", @@ -2085,6 +2168,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huo_Focus_on_Your_Target_A_Dual_Teacher-Student_Framework_for_Domain-Adaptive_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09083", @@ -2110,6 +2194,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Masked_Retraining_Teacher-Student_Framework_for_Domain_Adaptive_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2135,6 +2220,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_DandelionNet_Domain_Composition_with_Instance_Adaptive_Classification_for_Domain_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2160,6 +2246,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jung_CAFA_Class-Aware_Feature_Alignment_for_Test-Time_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.00205", @@ -2185,6 +2272,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Christensen_Image-Free_Classifier_Injection_for_Zero-Shot_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10599", @@ -2210,6 +2298,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_CBA_Improving_Online_Continual_Learning_via_Continual_Bias_Adaptor_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06925", @@ -2235,6 +2324,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhai_Masked_Autoencoders_are_Efficient_Class_Incremental_Learners_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12510", @@ -2260,6 +2350,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_DomainDrop_Suppressing_Domain-Sensitive_Channels_for_Domain_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10285", @@ -2285,6 +2376,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zheng_Preventing_Zero-Shot_Transfer_Degradation_in_Continual_Learning_of_Vision-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06628", @@ -2310,6 +2402,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Incremental_Generalized_Category_Discovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.14310", @@ -2335,6 +2428,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_SLCA_Slow_Learner_with_Classifier_Alignment_for_Continual_Learning_on_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05118", @@ -2360,6 +2454,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Efficient_Model_Personalization_in_Federated_Learning_via_Client-Specific_Prompt_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.15367", @@ -2385,6 +2480,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_iDAG_Invariant_DAG_Searching_for_Domain_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2410,6 +2506,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ahmed_SSDA_Secure_Source-Free_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2435,6 +2532,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Learning_Pseudo-Relations_for_Cross-domain_Semantic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2460,6 +2558,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Self-Organizing_Pathway_Expansion_for_Non-Exemplar_Class-Incremental_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2485,6 +2584,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ngo_Improved_Knowledge_Transfer_for_Semi-Supervised_Domain_Adaptation_via_Trico_Training_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2510,6 +2610,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gu_Few-shot_Continual_Infomax_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2535,6 +2636,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Saha_EDAPS_Enhanced_Domain-Adaptive_Panoptic_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.14291", @@ -2560,6 +2662,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Label-Efficient_Online_Continual_Object_Detection_in_Streaming_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.00309", @@ -2585,6 +2688,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Prototypical_Kernel_Learning_and_Open-set_Foreground_Perception_for_Generalized_Few-shot_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04952", @@ -2610,6 +2714,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Moon_MSI_Maximize_Support-Set_Information_for_Few-Shot_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.04673", @@ -2635,6 +2740,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_AREA_Adaptive_Reweighting_via_Effective_Area_for_Long-Tailed_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2660,6 +2766,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chattopadhyay_PASTA_Proportional_Amplitude_Spectrum_Training_Augmentation_for_Syn-to-Real_Domain_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.00979", @@ -2685,6 +2792,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xia_Personalized_Semantics_Excitation_for_Federated_Image_Classification_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2710,6 +2818,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xia_Few-Shot_Video_Classification_via_Representation_Fusion_and_Promotion_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2735,6 +2844,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gasperini_Segmenting_Known_Objects_and_Unseen_Unknowns_without_Prior_Knowledge_ICCV_2023_paper.pdf", "paper_arxiv_id": "2209.05407", diff --git a/json_data/video-analysis-and-understanding.json b/json_data/video-analysis-and-understanding.json index 2c35a7d..d817b46 100644 --- a/json_data/video-analysis-and-understanding.json +++ b/json_data/video-analysis-and-understanding.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Argaw_Long-range_Multimodal_Pretraining_for_Movie_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09775.pdf", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Cross-view_Semantic_Alignment_for_Livestreaming_Product_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04912", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_HTML_Hybrid_Temporal-scale_Multimodal_Learning_Framework_for_Referring_Video_Object_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_DyGait_Exploiting_Dynamic_Representations_for_High-performance_Gait_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14953", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Deng_Identity-Consistent_Aggregation_for_Video_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07737", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_Augmenting_and_Aligning_Snippets_for_Few-Shot_Video_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.10451", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shao_Action_Sensitivity_Learning_for_Temporal_Action_Localization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.15701", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_SwinLSTM_Improving_Spatiotemporal_Prediction_Accuracy_using_Swin_Transformer_and_LSTM_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09891", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hong_LVOS_A_Benchmark_for_Long-term_Video_Object_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.10181", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_MGMAE_Motion_Guided_Masking_for_Video_Masked_Autoencoding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10794", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Aziere_Markov_Game_Video_Augmentation_for_Action_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ladune_COOL-CHIC_Coordinate-based_Low_Complexity_Hierarchical_Image_Codec_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bulat_ReGen_A_good_Generative_Zero-Shot_Video_Classifier_Should_be_Rewarded_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ali_Task_Agnostic_Restoration_of_Natural_Video_Dynamics_ICCV_2023_paper.pdf", "paper_arxiv_id": "2206.03753", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hirschorn_Normalizing_Flows_for_Human_Pose_Anomaly_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.10946", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Movement_Enhancement_toward_Multi-Scale_Video_Feature_Representation_for_Temporal_Action_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Event-Guided_Procedure_Planning_from_Instructional_Videos_with_Text_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08885", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yoon_SCANet_Scene_Complexity_Aware_Network_for_Weakly-Supervised_Video_Moment_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.05241", @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Spatio-temporal_Prompting_Network_for_Robust_Video_Feature_Extraction_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fioresi_TeD-SPAD_Temporal_Distinctiveness_for_Self-Supervised_Privacy-Preservation_for_Video_Anomaly_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11072", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tian_Non-Semantics_Suppressed_Mask_Learning_for_Unsupervised_Video_Semantic_Compression_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_UnLoc_A_Unified_Framework_for_Video_Localization_Tasks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11062", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Duan_SkeleTR_Towards_Skeleton-based_Action_Recognition_in_the_Wild_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_AutoAD_II_The_Sequel_-_Who_When_and_What_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.06838", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Plizzari_What_Can_a_Cook_in_Italy_Teach_a_Mechanic_in_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.08713", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Barrios_Localizing_Moments_in_Long_Video_Via_Multimodal_Guidance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.13372", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_LAC_-_Latent_Action_Composition_for_Skeleton-based_Action_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.14500", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_RIGID_Recurrent_GAN_Inversion_and_Editing_of_Real_Face_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06097", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bao_Uncertainty-aware_State_Space_Transformer_for_Egocentric_3D_Hand_Trajectory_Forecasting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08243", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_What_Can_Simple_Arithmetic_Operations_Do_for_Temporal_Modeling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08908", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fang_UATVR_Uncertainty-Adaptive_Text-Video_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.06309", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_D3G_Exploring_Gaussian_Prior_for_Temporal_Sentence_Grounding_with_Glance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04197", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_Unsupervised_Open-Vocabulary_Object_Localization_in_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.09858", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shao_HiVLP_Hierarchical_Interactive_Video-Language_Pre-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pan_Scanning_Only_Once_An_End-to-end_Framework_for_Fast_Temporal_Grounding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08345", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wasim_Video-FocalNets_Spatio-Temporal_Focal_Modulation_for_Video_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Djilali_Lip2Vec_Efficient_and_Robust_Visual_Speech_Recognition_via_Latent-to-Latent_Visual_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06112", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Heigold_Video_OWL-ViT_Temporally-consistent_Open-world_Localization_in_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Thoker_Tubelet-Contrastive_Self-Supervision_for_Video-Efficient_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11003", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Memory-and-Anticipation_Transformer_for_Online_Action_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07893", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_Video_Action_Segmentation_via_Contextually_Refined_Temporal_Keypoints_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jang_Knowing_Where_to_Focus_Event-aware_Transformer_for_Video_Grounding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06947", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_MPI-Flow_Learning_Realistic_Optical_Flow_with_Multiplane_Images_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.06714", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Discovering_Spatio-Temporal_Rationales_for_Video_Question_Answering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12058", @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Scalable_Video_Object_Segmentation_with_Simplified_Framework_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09903", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Root_Pose_Decomposition_Towards_Generic_Non-rigid_3D_Reconstruction_with_Monocular_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10089", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Helping_Hands_An_Object-Aware_Ego-Centric_Video_Recognition_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07918", @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Modeling_the_Relative_Visual_Tempo_for_Self-supervised_Skeleton-based_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Tube-Link_A_Flexible_Cross_Tube_Framework_for_Universal_Video_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12782", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qing_Disentangling_Spatial_and_Temporal_Learning_for_Efficient_Image-to-Video_Transfer_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.07911", @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Tem-Adapter_Adapting_Image-Text_Pretraining_for_Video_Question_Answer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08414", diff --git a/json_data/vision-and-audio.json b/json_data/vision-and-audio.json index 9121c27..0ef0105 100644 --- a/json_data/vision-and-audio.json +++ b/json_data/vision-and-audio.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Senocak_Sound_Source_Localization_is_All_about_Cross-Modal_Alignment_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.10724", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mo_Class-Incremental_Grouping_Network_for_Continual_Audio-Visual_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05281", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pian_Audio-Visual_Class-Incremental_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11073", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Choi_DiffV2S_Diffusion-Based_Video-to-Speech_Synthesis_with_Vision-Guided_Speaker_Embedding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07787", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jeong_The_Power_of_Sound_TPoS_Audio_Reactive_Video_Generation_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.04509", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Muaz_SIDGAN_High-Resolution_Dubbed_Video_Generation_via_Shift-Invariant_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Niu_On_the_Audio-visual_Synchronization_for_Lip-to-Speech_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.00502", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Be_Everywhere_-_Hear_Everything_BEE_Audio_Scene_Reconstruction_by_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yun_Dense_2D-3D_Indoor_Prediction_with_Sound_via_Aligned_Cross-Modal_Distillation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.11081", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hong_Hyperbolic_Audio-visual_Zero-shot_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12558", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chowdhury_AdVerb_Visually_Guided_Audio_Dereverberation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12370", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Sound_Localization_from_Motion_Jointly_Learning_Sound_Direction_and_Camera_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11329", diff --git a/json_data/vision-and-graphics.json b/json_data/vision-and-graphics.json index e428efe..48d3c73 100644 --- a/json_data/vision-and-graphics.json +++ b/json_data/vision-and-graphics.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mercier_Efficient_Neural_Supersampling_on_a_Novel_Gaming_Dataset_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.01483", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pang_Locally_Stylized_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.10684", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_NEMTO_Neural_Environment_Matting_for_Novel_View_and_Relighting_Synthesis_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.11963", @@ -85,6 +88,7 @@ "modelscope": "https://www.modelscope.cn/models/damo/cv_ddcolor_image-colorization/summary", "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kang_DDColor_Towards_Photo-Realistic_Image_Colorization_via_Dual_Decoders_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.11613", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_IntrinsicNeRF_Learning_Intrinsic_Neural_Radiance_Fields_for_Editable_Novel_View_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.00647", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_PARIS_Part-level_Reconstruction_and_Motion_Analysis_for_Articulated_Objects_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07391", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_ReMoDiffuse_Retrieval-Augmented_Motion_Diffusion_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01116", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/spaces/tmaham/DS-Fusion-Express", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tanveer_DS-Fusion_Artistic_Typography_via_Discriminated_and_Stylized_Diffusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09604", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qiao_Dynamic_Mesh-Aware_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Neural_Reconstruction_of_Relightable_Human_Model_from_Monocular_Video_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mai_Neural_Microfacet_Fields_for_Inverse_Rendering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.17806", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mehta_A_Theory_of_Topological_Derivatives_for_Inverse_Rendering_of_Geometry_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09865", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sella_Vox-E_Text-Guided_Voxel_Editing_of_3D_Objects_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.12048", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_StegaNeRF_Embedding_Invisible_Information_within_Neural_Radiance_Fields_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.01602", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_GlobalMapper_Arbitrary-Shaped_Urban_Layout_Generation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09693", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lu_Urban_Radiance_Field_Representation_with_Deformable_Neural_Mesh_Primitives_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.10776", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Roessle_End2End_Multi-View_Feature_Matching_with_Differentiable_Pose_Optimization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2205.01694", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Geng_Tree-Structured_Shading_Decomposition_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Piche-Meunier_Lens_Parameter_Estimation_for_Realistic_Depth_of_Field_Modeling_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhong_AttT2M_Text-Driven_Human_Motion_Generation_with_Multi-Perspective_Attention_Mechanism_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.00796", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/de_Guevara_Cross-modal_Latent_Space_Alignment_for_Image_to_Avatar_Translation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Computationally-Efficient_Neural_Image_Compression_with_Shallow_Decoders_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06244", diff --git a/json_data/vision-and-language.json b/json_data/vision-and-language.json index 769600c..f75f133 100644 --- a/json_data/vision-and-language.json +++ b/json_data/vision-and-language.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_SMAUG_Sparse_Masked_Autoencoder_for_Efficient_Video-Language_Pre-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.11446", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jin_DiffusionRet_Generative_Text-Video_Retrieval_with_Diffusion_Model_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09867", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_Explore_and_Tell_Embodied_Visual_Captioning_in_3D_Environments_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10447", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Distilling_Large_Vision-Language_Model_with_Out-of-Distribution_Generalizability_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.03135", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Learning_Trajectory-Word_Alignments_for_Video-Language_Tasks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.01953", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xue_Variational_Causal_Inference_Network_for_Explanatory_Visual_Question_Answering_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye-Bin_TextManiA_Enriching_Visual_Feature_by_Text-driven_Manifold_Augmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14611", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Segment_Every_Reference_Object_in_Spatial_and_Temporal_Spaces_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Gradient-Regulated_Meta-Prompt_Learning_for_Generalizable_Vision-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.06571", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Misalign_Contrast_then_Distill_Rethinking_Misalignments_in_Language-Image_Pre-training_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Toward_Multi-Granularity_Decision-Making_Explicit_Visual_Reasoning_with_Hierarchical_Knowledge_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bi_VL-Match_Enhancing_Vision-Language_Pretraining_with_Token-Level_and_Instance-Level_Matching_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Croitoru_Moment_Detection_in_Long_Tutorial_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Not_All_Features_Matter_Enhancing_Few-shot_CLIP_with_Adaptive_Prior_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01195", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Bitton-Guetta_Breaking_Common_Sense_WHOOPS_A_Vision-and-Language_Benchmark_of_Synthetic_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.07274", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Advancing_Referring_Expression_Segmentation_Beyond_Single_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.12452", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_PointCLIP_V2_Prompting_CLIP_and_GPT_for_Powerful_3D_Open-world_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.11682", @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/He_Unsupervised_Prompt_Tuning_for_Text-Driven_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Distilling_Coarse-to-Fine_Semantic_Matching_Knowledge_for_Weakly_Supervised_3D_Visual_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09267", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gu_I_Cant_Believe_Theres_No_Images_Learning_Visual_Tasks_Using_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.09778", @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Learning_Cross-Modal_Affinity_for_Referring_Video_Object_Segmentation_Targeting_Limited_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.02041", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ding_MeViS_A_Large-scale_Benchmark_for_Video_Segmentation_with_Motion_Expressions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08544", @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_Diverse_Data_Augmentation_with_Diffusions_for_Effective_Test-time_Prompt_Tuning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06038", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tian_ShapeScaffolder_Structure-Aware_3D_Shape_Generation_from_Text_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Udandarao_SuS-X_Training-Free_Name-Only_Transfer_of_Vision-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.16198", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_X-Mesh_Towards_Fast_and_Accurate_Text-driven_3D_Stylization_via_Dynamic_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15764", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_OnlineRefer_A_Simple_Online_Baseline_for_Referring_Video_Object_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09356", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Attentive_Mask_CLIP_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.08653", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Knowledge_Proxy_Intervention_for_Deconfounded_Video_Question_Answering_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_UniVTG_Towards_Unified_Video-Language_Temporal_Grounding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16715", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tu_Self-supervised_Cross-view_Representation_Reconstruction_for_Change_Captioning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Unified_Coarse-to-Fine_Alignment_for_Video-Text_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.10091", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Confidence-aware_Pseudo-label_Learning_for_Weakly_Supervised_Visual_Grounding_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_TextPSG_Panoptic_Scene_Graph_Generation_from_Textual_Descriptions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.07056", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_MAtch_eXpand_and_Improve_Unsupervised_Finetuning_for_Zero-Shot_Action_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08914", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Unify_Align_and_Refine_Multi-Level_Semantic_Alignment_for_Radiology_Report_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15932", @@ -910,6 +946,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gupta_CLIPTrans_Transferring_Visual_Knowledge_with_Pre-trained_Models_for_Multimodal_Machine_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.15226", @@ -935,6 +972,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Alper_Learning_Human-Human_Interactions_in_Images_from_Weak_Textual_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.14104", @@ -960,6 +998,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Jiang_BUS_Efficient_and_Effective_Vision-Language_Pre-Training_with_Bottom-Up_Patch_Summarization._ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.08504", @@ -985,6 +1024,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_3D-VisTA_Pre-trained_Transformer_for_3D_Vision_and_Text_Alignment_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.04352", @@ -1010,6 +1050,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_ALIP_Adaptive_Language-Image_Pre-Training_with_Synthetic_Caption_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.08428", @@ -1035,6 +1076,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_LoGoPrompt_Synthetic_Text_Images_Can_Be_Good_Visual_Prompts_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01155", @@ -1060,6 +1102,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kang_Noise-Aware_Learning_from_Web-Crawled_Image-Text_Data_for_Image_Captioning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.13563", @@ -1085,6 +1128,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qian_Decouple_Before_Interact_Multi-Modal_Prompt_Learning_for_Continual_Visual_Question_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1110,6 +1154,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_PromptCap_Prompt-Guided_Image_Captioning_for_VQA_with_GPT-3_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.09699", @@ -1135,6 +1180,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Grounded_Image_Text_Matching_with_Mismatched_Relation_Reasoning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.01236", @@ -1160,6 +1206,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Abdelsalam_GePSAn_Generative_Procedure_Step_Anticipation_in_Cooking_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1185,6 +1232,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Song_LLM-Planner_Few-Shot_Grounded_Planning_for_Embodied_Agents_with_Large_Language_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.04088", @@ -1210,6 +1258,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_VL-PET_Vision-and-Language_Parameter-Efficient_Tuning_via_Granularity_Control_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09804", @@ -1235,6 +1284,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Barraco_With_a_Little_Help_from_Your_Own_Past_Prototypical_Memory_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12383", @@ -1260,6 +1310,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cho_DALL-Eval_Probing_the_Reasoning_Skills_and_Social_Biases_of_Text-to-Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2202.04053", @@ -1285,6 +1336,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hong_Learning_Navigational_Visual_Representations_with_Semantic_Map_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12335", @@ -1310,6 +1362,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_CoTDet_Affordance_Knowledge_Prompting_for_Task_Driven_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01093", @@ -1335,6 +1388,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xi_Open_Set_Video_HOI_detection_from_Action-Centric_Chain-of-Look_Prompting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1360,6 +1414,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yan_Learning_Concise_and_Descriptive_Attributes_for_Visual_Recognition_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.03685", @@ -1385,6 +1440,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ko_Open-vocabulary_Video_Question_Answering_A_New_Benchmark_for_Evaluating_the_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09363", @@ -1410,6 +1466,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mensink_Encyclopedic_VQA_Visual_Questions_About_Detailed_Properties_of_Fine-Grained_Categories_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.09224", @@ -1435,6 +1492,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ahn_Story_Visualization_by_Online_Text_Augmentation_with_Context_Memory_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07575", @@ -1460,6 +1518,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fei_Transferable_Decoding_with_Visual_Entities_for_Zero-Shot_Image_Captioning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16525", @@ -1485,6 +1544,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Too_Large_Data_Reduction_for_Vision-Language_Pre-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.20087", @@ -1510,6 +1570,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_ViLTA_Enhancing_Vision-Language_Pre-training_through_Textual_Augmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16689", @@ -1535,6 +1596,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Baldrati_Zero-Shot_Composed_Image_Retrieval_with_Textual_Inversion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15247", @@ -1560,6 +1622,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Abdelreheem_SATR_Zero-Shot_Semantic_Segmentation_of_3D_Shapes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.04909", @@ -1585,6 +1648,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_CiT_Curation_in_Training_for_Effective_Vision-Language_Data_ICCV_2023_paper.pdf", "paper_arxiv_id": "2301.02241", @@ -1610,6 +1674,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Khattak_Self-regulating_Prompts_Foundational_Model_Adaptation_without_Forgetting_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.06948", @@ -1635,6 +1700,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mavroudi_Learning_to_Ground_Instructional_Articles_in_Videos_through_Narrations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.03802", @@ -1660,6 +1726,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kurita_RefEgo_Referring_Expression_Comprehension_Dataset_from_First-Person_Perception_of_Ego4D_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12035", @@ -1685,6 +1752,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Multi3DRefer_Grounding_Text_Description_to_Multiple_3D_Objects_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.05251", @@ -1710,6 +1778,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Derakhshani_Bayesian_Prompt_Learning_for_Image-Language_Model_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2210.02390", @@ -1735,6 +1804,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Goel_Who_Are_You_Referring_To_Coreference_Resolution_In_Image_Narrations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.14563", @@ -1760,6 +1830,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kornblith_Guiding_Image_Captioning_Models_Toward_More_Specific_Captions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.16686", @@ -1785,6 +1856,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kil_PreSTU_Pre-Training_for_Scene-Text_Understanding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2209.05534", @@ -1810,6 +1882,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_Exploring_Group_Video_Captioning_with_Efficient_Relational_Approximation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -1835,6 +1908,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Slyman_VLSlice_Interactive_Vision-and-Language_Slice_Discovery_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.06703", @@ -1860,6 +1934,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Patel_Pretrained_Language_Models_as_Visual_Planners_for_Human_Assistance_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.09179", @@ -1885,6 +1960,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_VQA_Therapy_Exploring_Answer_Differences_by_Visually_Grounding_Answers_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11662", @@ -1910,6 +1986,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Towards_High-Fidelity_Text-Guided_3D_Face_Generation_and_Manipulation_Using_only_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.16758", @@ -1935,6 +2012,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Baldrati_Zero-Shot_Composed_Image_Retrieval_with_Textual_Inversion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15247", @@ -1960,6 +2038,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_PatchCT_Aligning_Patch_Set_and_Label_Set_with_Conditional_Transport_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.09066", @@ -1985,6 +2064,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Lip_Reading_for_Low-resource_Languages_by_Learning_and_Combining_General_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09311", @@ -2010,6 +2090,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Guo_ViewRefer_Grasp_the_Multi-view_Knowledge_for_3D_Visual_Grounding_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16894", @@ -2035,6 +2116,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_AerialVLN_Vision-and-Language_Navigation_for_UAVs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06735", @@ -2060,6 +2142,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Trager_Linear_Spaces_of_Meanings_Compositional_Structures_in_Vision-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.14383", @@ -2085,6 +2168,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ye_HiTeA_Hierarchical_Temporal-Aware_Video-Language_Pre-training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.14546", @@ -2110,6 +2194,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hazra_EgoTV_Egocentric_Task_Verification_from_Natural_Language_Task_Descriptions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.16975", @@ -2135,6 +2220,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_SINC_Self-Supervised_In-Context_Learning_for_Vision-Language_Tasks_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.07742", @@ -2160,6 +2246,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qiao_VLN-PETL_Parameter-Efficient_Transfer_Learning_for_Vision-and-Language_Navigation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10172", @@ -2185,6 +2272,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_Going_Denser_with_Open-Vocabulary_Part_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2305.11173", @@ -2210,6 +2298,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Tang_Temporal_Collection_and_Distribution_for_Referring_Video_Object_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.03473", @@ -2235,6 +2324,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Inverse_Compositional_Learning_for_Weakly-supervised_Relation_Grounding_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2260,6 +2350,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Why_Is_Prompt_Tuning_for_Vision-Language_Models_Robust_to_Noisy_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11978", @@ -2285,6 +2376,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Han_CHAMPAGNE_Learning_Real-world_Conversation_from_Large-Scale_Web_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09713", @@ -2310,6 +2402,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fan_RCA-NOC_Relative_Contrastive_Alignment_for_Novel_Object_Captioning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2335,6 +2428,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sun_DIME-FM__DIstilling_Multimodal_and_Efficient_Foundation_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.18232", @@ -2360,6 +2454,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ouali_Black_Box_Few-Shot_Adaptation_for_Vision-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.01752", @@ -2385,6 +2480,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Shatter_and_Gather_Learning_Referring_Image_Segmentation_with_Text_Supervision_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.15512", @@ -2410,6 +2506,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shen_Accurate_and_Fast_Compressed_Video_Captioning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.12867", @@ -2435,6 +2532,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Exploring_Temporal_Concurrency_for_Video-Language_Representation_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2460,6 +2558,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Momeni_Verbs_in_Action_Improving_Verb_Understanding_in_Video-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06708", @@ -2485,6 +2584,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yao_Sign_Language_Translation_with_Iterative_Prototype_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12191", @@ -2510,6 +2610,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kim_Contrastive_Feature_Masking_Open-Vocabulary_Vision_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.00775", @@ -2535,6 +2636,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Toward_Unsupervised_Realistic_Visual_Question_Answering_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05068", @@ -2560,6 +2662,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_GridMM_Grid_Memory_Map_for_Vision-and-Language_Navigation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12907", @@ -2585,6 +2688,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhuo_Video_Background_Music_Generation_Dataset_Method_and_Evaluation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.11248", @@ -2610,6 +2714,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Deng_Prompt_Switch_Efficient_CLIP_Adaptation_for_Text-Video_Retrieval_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.07648", @@ -2635,6 +2740,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhu_Prompt-aligned_Gradient_for_Prompt_Tuning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2205.14865", @@ -2660,6 +2766,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kan_Knowledge-Aware_Prompt_Tuning_for_Generalizable_Vision-Language_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11186", @@ -2685,6 +2792,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ma_Order-Prompted_Tag_Sequence_Generation_for_Video_Tagging_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2710,6 +2818,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Pratt_What_Does_a_Platypus_Look_Like_Generating_Customized_Prompts_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2209.03320", @@ -2735,6 +2844,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cho_PromptStyler_Prompt-driven_Style_Generation_for_Source-free_Domain_Generalization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15199", @@ -2760,6 +2870,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_DiffDis_Empowering_Generative_Diffusion_Model_with_Cross-Modal_Discrimination_Capability_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.09306", @@ -2785,6 +2896,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shi_EdaDet_Open-Vocabulary_Object_Detection_Using_Early_Dense_Alignment_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01151", @@ -2810,6 +2922,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cheng_MixSpeech_Cross-Modality_Self-Learning_with_Audio-Visual_Stream_Mixup_for_Visual_Speech_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05309", @@ -2835,6 +2948,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Roth_Waffling_Around_for_Performance_Visual_Classification_with_Random_Words_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.07282", @@ -2860,6 +2974,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Qiao_March_in_Chat_Interactive_Prompting_for_Remote_Embodied_Referring_Expression_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10141", @@ -2885,6 +3000,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yu_Chinese_Text_Recognition_with_A_Pre-Trained_CLIP-Like_Model_Through_Image-IDS_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.01083", @@ -2910,6 +3026,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Schulter_OmniLabel_A_Challenging_Benchmark_for_Language-Based_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.11463", @@ -2935,6 +3052,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_IntentQA_Context-aware_Video_Intent_Reasoning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -2960,6 +3078,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhai_Sigmoid_Loss_for_Language_Image_Pre-Training_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.15343", @@ -2985,6 +3104,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shtedritski_What_does_CLIP_know_about_a_red_circle_Visual_prompt_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.06712", @@ -3010,6 +3130,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Equivariant_Similarity_for_Vision-Language_Foundation_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.14465", @@ -3035,6 +3156,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Scaling_Data_Generation_in_Vision-and-Language_Navigation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.15644", @@ -3060,6 +3182,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Su_Name_Your_Colour_For_the_Task_Artificially_Discover_Colour_Naming_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.03434", @@ -3085,6 +3208,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_G2L_Semantically_Aligned_and_Uniform_Video_Grounding_via_Geodesic_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14277", @@ -3110,6 +3234,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cui_Grounded_Entity-Landmark_Adaptive_Pre-Training_for_Vision-and-Language_Navigation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12587", @@ -3135,6 +3260,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Ibrahimi_Audio-Enhanced_Text-to-Video_Retrieval_using_Text-Conditioned_Feature_Alignment_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12964", @@ -3160,6 +3286,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hu_Open-domain_Visual_Entity_Recognition_Towards_Recognizing_Millions_of_Wikipedia_Entities_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.11154", diff --git a/json_data/vision-and-robotics.json b/json_data/vision-and-robotics.json index 92fa3d7..9cefea1 100644 --- a/json_data/vision-and-robotics.json +++ b/json_data/vision-and-robotics.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Simoun_Synergizing_Interactive_Motion-appearance_Understanding_for_Vision-based_Reinforcement_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Among_Us_Adversarially_Robust_Collaborative_Perception_by_Consensus_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.09495", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Saltori_Walking_Your_LiDOG_A_Journey_Through_Multiple_Domains_for_LiDAR_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.11705", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhai_Stabilizing_Visual_Reinforcement_Learning_via_Asymmetric_Interactive_Cooperation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liang_MAAL_Multimodality-Aware_Autoencoder-Based_Affordance_Learning_for_3D_Articulated_Objects_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Kong_Rethinking_Range_View_Representation_for_LiDAR_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05367", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_PourIt_Weakly-Supervised_Liquid_Perception_from_a_Single_Image_for_Visual_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11299", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Moreau_CROSSFIRE_Camera_Relocalization_On_Self-Supervised_Features_from_an_Implicit_Representation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.04869", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Choi_Environment_Agnostic_Representation_for_Visual_Reinforcement_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cui_Test-time_Personalizable_Forecasting_of_3D_Human_Poses_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xiang_HM-ViT_Hetero-Modal_Vehicle-to-Vehicle_Cooperative_Perception_with_Vision_Transformer_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.10628", diff --git a/json_data/vision-applications-and-systems.json b/json_data/vision-applications-and-systems.json index 156929d..f3abbc4 100644 --- a/json_data/vision-applications-and-systems.json +++ b/json_data/vision-applications-and-systems.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chowdhury_Democratising_2D_Sketch_to_3D_Shape_Retrieval_Through_Pivoting_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Feng_Towards_Instance-adaptive_Inference_for_Federated_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.06051", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_TransTIC_Transferring_Transformer-based_Image_Compression_from_Human_Perception_to_Machine_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.05085", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_Counting_Crowds_in_Bad_Weather_ICCV_2023_paper.pdf", "paper_arxiv_id": "2306.01209", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Xu_NeRF-Det_Learning_Geometry-Aware_Volumetric_Representation_for_Multi-View_3D_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.14620", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Sadoughi_MEGA_Multimodal_Alignment_Aggregation_and_Distillation_For_Cinematic_Video_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.11185", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Bring_Clipart_to_Life_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Hwang_UpCycling_Semi-supervised_3D_Object_Detection_without_Sharing_Raw-level_Unlabeled_Scenes_ICCV_2023_paper.pdf", "paper_arxiv_id": "2211.11950", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_Graph_Matching_with_Bi-level_Noisy_Correspondence_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.04085", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Shin_Anomaly_Detection_using_Score-based_Perturbation_Resilience_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_Spatio-Temporal_Domain_Awareness_for_Multi-Agent_Collaborative_Perception_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13929", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Baldrati_Multimodal_Garment_Designer_Human-Centric_Latent_Diffusion_Models_for_Fashion_Image_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.02051", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Towards_Unifying_Medical_Vision-and-Language_Pre-Training_via_Soft_Prompts_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.08958", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhuang_MAS_Towards_Resource-Efficient_Federated_Multiple-Task_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.11285", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_Hierarchical_Visual_Categories_Modeling_A_Joint_Representation_Learning_and_Density_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liu_Improving_Generalization_in_Visual_Reinforcement_Learning_via_Conflict-aware_Gradient_Agreement_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.01194", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Tiny_Updater_Towards_Efficient_Neural_Network-Driven_Software_Updating_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Multiple_Planar_Object_Tracking_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_OmnimatteRF_Robust_Omnimatte_with_3D_Background_Modeling_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.07749", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wen_Ordinal_Label_Distribution_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_Re-mine_Learn_and_Reason_Exploring_the_Cross-modal_Semantic_Correlations_for_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.13529", @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Li_MUVA_A_New_Large-Scale_Benchmark_for_Multi-View_Amodal_Instance_Segmentation_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Chen_Editable_Image_Geometric_Abstraction_via_Neural_Primitive_Assembly_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Drehwald_One-Shot_Recognition_of_Any_Material_Anywhere_Using_Contrastive_Learning_with_ICCV_2023_paper.pdf", "paper_arxiv_id": "2212.00648", @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhao_Fast_Full-frame_Video_Stabilization_with_Iterative_Optimization_ICCV_2023_paper.pdf", "paper_arxiv_id": "2307.12774", @@ -635,6 +660,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Gu_Two_Birds_One_Stone_A_Unified_Framework_for_Joint_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.11335", @@ -660,6 +686,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Cao_Multi-Modal_Gated_Mixture_of_Local-to-Global_Experts_for_Dynamic_Image_Fusion_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.01392", @@ -685,6 +712,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wilson_SAFE_Sensitivity-Aware_Features_for_Out-of-Distribution_Object_Detection_ICCV_2023_paper.pdf", "paper_arxiv_id": "2208.13930", @@ -710,6 +738,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_GeT_Generative_Target_Structure_Debiasing_for_Domain_Adaptation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10205", @@ -735,6 +764,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_HairCLIPv2_Unifying_Hair_Editing_via_Proxy_Feature_Blending_ICCV_2023_paper.pdf", "paper_arxiv_id": "2310.10651", @@ -760,6 +790,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Fu_Deformer_Dynamic_Fusion_Transformer_for_Robust_Hand_Pose_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.04991", @@ -785,6 +816,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wei_Improving_Continuous_Sign_Language_Recognition_with_Cross-Lingual_Signs_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.10809", @@ -810,6 +842,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Lin_A_Parse-Then-Place_Approach_for_Generating_Graphic_Layouts_from_Textual_Descriptions_ICCV_2023_paper.pdf", "paper_arxiv_id": "2308.12700", @@ -835,6 +868,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Klinghoffer_DISeR_Designing_Imaging_Systems_with_Reinforcement_Learning_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.13851", @@ -860,6 +894,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Liao_Segmentation_of_Tubular_Structures_Using_Iterative_Training_with_Tailored_Samples_ICCV_2023_paper.pdf", "paper_arxiv_id": "2309.08727", @@ -885,6 +920,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Nunes_Time-to-Contact_Map_by_Joint_Estimation_of_Up-to-Scale_Inverse_Depth_and_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/vision-graphics-and-robotics.json b/json_data/vision-graphics-and-robotics.json index 047bd40..57b93d8 100644 --- a/json_data/vision-graphics-and-robotics.json +++ b/json_data/vision-graphics-and-robotics.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhang_Adding_Conditional_Control_to_Text-to-Image_Diffusion_Models_ICCV_2023_paper.pdf", "paper_arxiv_id": "2302.05543", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wu_Factorized_Inverse_Path_Tracing_for_Efficient_and_Accurate_Material-Lighting_Estimation_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.05669", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wang_Manipulate_by_Seeing_Creating_Manipulation_Controllers_from_Pre-Trained_Representations_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.08135", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Zhong_3D_Implicit_Transporter_for_Temporally_Consistent_Keypoint_Discovery_ICCV_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Mankovich_Chordal_Averaging_on_Flag_Manifolds_and_Its_Applications_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.13501", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Wan_UniDexGrasp_Improving_Dexterous_Grasping_Policy_Learning_via_Geometry-Aware_Curriculum_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2304.00464", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Huang_GameFormer_Game-theoretic_Modeling_and_Learning_of_Transformer-based_Interactive_Prediction_and_ICCV_2023_paper.pdf", "paper_arxiv_id": "2303.05760", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023/papers/Yang_PPR_Physically_Plausible_Reconstruction_from_Monocular_Videos_ICCV_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/visual-inductive-priors-for-data-efficient-dl-w.json b/json_data/visual-inductive-priors-for-data-efficient-dl-w.json index c093253..b3676a3 100644 --- a/json_data/visual-inductive-priors-for-data-efficient-dl-w.json +++ b/json_data/visual-inductive-priors-for-data-efficient-dl-w.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Edixhoven_Using_and_Abusing_Equivariance_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.11316", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Strafforello_Video_BagNet_Short_Temporal_Receptive_Fields_Increase_Robustness_in_Long-Term_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.11249", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Daroya_COSE_A_Consistency-Sensitivity_Metric_for_Saliency_on_Image_Classification_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.10989", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Wang_DFM-X_Augmentation_by_Leveraging_Prior_Knowledge_of_Shortcut_Learning_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.06622", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Estepa_Good_Fences_Make_Good_Neighbours_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Shyam_Data_Efficient_Single_Image_Dehazing_via_Adversarial_Auto-Augmentation_and_Extended_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Radwan_Distilling_Part-Whole_Hierarchical_Knowledge_from_a_Huge_Pretrained_Class_Agnostic_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Garcia-Gasulla_Padding_Aware_Neurons_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.08048", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Ganatra_Logarithm-Transform_Aided_Gaussian_Sampling_for_Few-Shot_Learning_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.16337", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Mazumder_DeepVAT_A_Self-Supervised_Technique_for_Cluster_Assessment_in_Image_Datasets_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2306.00011", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Brigato_No_Data_Augmentation_Alternative_Regularizations_for_Effective_Training_on_Small_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.01694", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Nicodemou_RV-VAE_Integrating_Random_Variable_Algebra_into_Variational_Autoencoders_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Saha_PARTICLE_Part_Discovery_and_Contrastive_Learning_for_Fine-Grained_Recognition_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.13822", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Silva_Self-Supervised_Learning_of_Contextualized_Local_Visual_Embeddings_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2310.00527", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Thopalli_InterAug_A_Tuning-Free_Augmentation_Policy_for_Data-Efficient_and_Robust_Object_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Cosma_Geometric_Superpixel_Representations_for_Efficient_Image_Classification_with_Graph_Neural_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/VIPriors/papers/Koishekenov_Geometric_Contrastive_Learning_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/w-and-challenge-on-deepfake-analysis-and-detection.json b/json_data/w-and-challenge-on-deepfake-analysis-and-detection.json index d5db52a..8bba37d 100644 --- a/json_data/w-and-challenge-on-deepfake-analysis-and-detection.json +++ b/json_data/w-and-challenge-on-deepfake-analysis-and-detection.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Lorenz_Detecting_Images_Generated_by_Deep_Diffusion_Models_Using_Their_Local_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Balaji_Attending_Generalizability_in_Course_of_Deep_Fake_Detection_by_Exploring_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.13503", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Nandi_TrainFors_A_Large_Benchmark_Training_Dataset_for_Image_Manipulation_Detection_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.05264", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Rosberg_FIVA_Facial_Image_and_Video_Anonymization_and_Anonymization_Defense_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.04228", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Aghasanli_Interpretable-Through-Prototypes_Deepfake_Detection_for_Diffusion_Models_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Das_Learning_Interpretable_Forensic_Representations_via_Local_Window_Modulation_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Kamat_Revisiting_Generalizability_in_Deepfake_Detection_Improving_Metrics_and_Stabilizing_Transfer_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Beuve_WaterLo_Protect_Images_from_Deepfakes_Using_Localized_Semi-Fragile_Watermark_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Epstein_Online_Detection_of_AI-Generated_Images__ICCVW_2023_paper.pdf", "paper_arxiv_id": "2310.15150", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Husseini_A_Comprehensive_Framework_for_Evaluating_Deepfake_Generators_Dataset_Metrics_Performance_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Saha_Undercover_Deepfakes_Detecting_Fake_Segments_in_Videos_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2305.06564", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/DFAD/papers/Hamadene_Deepfakes_Signatures_Detection_in_the_Handcrafted_Features_Space_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/w-on-cv-in-plant-phenotyping-and-agriculture.json b/json_data/w-on-cv-in-plant-phenotyping-and-agriculture.json index d2540e2..e102b41 100644 --- a/json_data/w-on-cv-in-plant-phenotyping-and-agriculture.json +++ b/json_data/w-on-cv-in-plant-phenotyping-and-agriculture.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Penzel_Analyzing_the_Behavior_of_Cauliflower_Harvest-Readiness_Models_by_Investigating_Feature_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Schauer_Towards_Automated_Regulation_of_Jacobaea_Vulgaris_in_Grassland_Using_Deep_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Casado-Garcia_Estimation_of_Crop_Production_by_Fusing_Images_and_Crop_Features_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Lejeune_An_Interpretable_Framework_to_Characterize_Compound_Treatments_on_Filamentous_Fungi_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Rustia_Rapid_Tomato_DUS_Trait_Analysis_Using_an_Optimized_Mobile-Based_Coarse-to-Fine_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Jol_Non-Destructive_Infield_Quality_Estimation_of_Strawberries_Using_Deep_Architectures_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Villalpando_Reinforcement_Learning_with_Space_Carving_for_Plant_Scanning_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Cherepashkin_Deep_Learning_Based_3d_Reconstruction_for_Phenotyping_of_Wheat_Seeds_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Celikkan_Semantic_Segmentation_of_Crops_andWeeds_with_Probabilistic_Modeling_and_Uncertainty_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": "https://huggingface.co/datasets/deep-plants/AGM_HS", "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Sama_A_new_Large_Dataset_and_a_Transfer_Learning_Methodology_for_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Tausch_Pollinators_as_Data_Collectors_Estimating_Floral_Diversity_with_Bees_and_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Farag_Inductive_Conformal_Prediction_for_Harvest-Readiness_Classification_of_Cauliflower_Plants_A_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Melki_Group-Conditional_Conformal_Prediction_via_Quantile_Regression_Calibration_for_Crop_and_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.15094", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Wagner_Vision-Based_Monitoring_of_the_Short-Term_Dynamic_Behaviour_of_Plants_for_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Engstrom_Improving_Deep_Learning_on_Hyperspectral_Images_of_Grain_by_Incorporating_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Chen_Adapting_Vision_Foundation_Models_for_Plant_Phenotyping_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Amine_Embedded_Plant_Recognition_A_Benchmark_for_low_Footprint_Deep_Neural_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Tempelaere_Deep_Learning_for_Apple_Fruit_Quality_Inspection_Using_X-Ray_Imaging_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Najafian_Detection_of_Fusarium_Damaged_Kernels_in_Wheat_Using_Deep_Semi-Supervised_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Korschens_Unified_Automatic_Plant_Cover_and_Phenology_Prediction_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -510,6 +530,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Wang_Weed_Mapping_with_Convolutional_Neural_Networks_on_High_Resolution_Whole-Field_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -535,6 +556,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Hartley_Unlocking_Comparative_Plant_Scoring_with_Siamese_Neural_Networks_and_Pairwise_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -560,6 +582,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Page-Fortin_Class-Incremental_Learning_of_Plant_and_Disease_Detection_Growing_Branches_with_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2304.06619", @@ -585,6 +608,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Gentilhomme_Efficient_Grapevine_Structure_Estimation_in_Vineyards_Conditions_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -610,6 +634,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/CVPPA/papers/Song_Plant_Root_Occlusion_Inpainting_with_Generative_Adversarial_Network_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/w-on-new-ideas-in-vision-transformers.json b/json_data/w-on-new-ideas-in-vision-transformers.json index 391ad33..ddb5ee1 100644 --- a/json_data/w-on-new-ideas-in-vision-transformers.json +++ b/json_data/w-on-new-ideas-in-vision-transformers.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Englebert_Explaining_Through_Transformer_Input_Sampling_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Mondal_Actor-Agnostic_Multi-Label_Action_Recognition_with_Multi-Modal_Query_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2307.10763", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Sun_All-pairs_Consistency_Learning_forWeakly_Supervised_Semantic_Segmentation_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Wang_Dual-Contrastive_Dual-Consistency_Dual-Transformer_A_Semi-Supervised_Approach_to_Medical_Image_Segmentation_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Djenouri_A_Hybrid_Visual_Transformer_for_Efficient_Deep_Human_Activity_Recognition_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Haurum_Which_Tokens_to_Use_Investigating_Token_Reduction_in_Vision_Transformers_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.04657", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Yoo_Hierarchical_Spatiotemporal_Transformers_for_Video_Object_Segmentation_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2307.08263", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Das_IDTransformer_Transformer_for_Intrinsic_Image_Decomposition_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Havtorn_MSViT_Dynamic_Mixed-Scale_Tokenization_for_Vision_Transformers_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2307.02321", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Hertlein_Template-Guided_Illumination_Correction_for_Document_Images_with_Imperfect_Geometric_Reconstruction_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Diba_Spatio-Temporal_Convolution-Attention_Video_Network_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Sekhar_TSOSVNet_Teacher-Student_Collaborative_Knowledge_Distillation_for_Online_Signature_Verification_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Jain_SeMask_Semantically_Masked_Transformers_for_Semantic_Segmentation_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2112.12782", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Shamsolmoali_TransInpaint_Transformer-Based_Image_Inpainting_with_Context_Adaptation_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Li_Interactive_Image_Segmentation_with_Cross-Modality_Vision_Transformers_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2307.02280", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Ganugula_MOSAIC_Multi-Object_Segmented_Arbitrary_Stylization_Using_CLIP_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.13716", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Homeyer_On_Moving_Object_Segmentation_from_Monocular_Video_with_Transformers_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/NIVT/papers/Wang_SCSC_Spatial_Cross-Scale_Convolution_Module_to_Strengthen_Both_CNNs_and_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.07110", diff --git a/json_data/w-representation-learning-with-very-limited-images.json b/json_data/w-representation-learning-with-very-limited-images.json index c87e253..595c633 100644 --- a/json_data/w-representation-learning-with-very-limited-images.json +++ b/json_data/w-representation-learning-with-very-limited-images.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Lim_Image_Guided_Inpainting_with_Parameter_Efficient_Learning_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Park_Augmenting_Features_via_Contrastive_Learning-Based_Generative_Model_for_Long-Tailed_Classification_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Kender_G2L_A_High-Dimensional_Geometric_Approach_for_Automatic_Generation_of_Highly_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Marcu_Self-Supervised_Hypergraphs_for_Learning_Multiple_World_Interpretations_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.07615", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Kwarciak_Deep_Generative_Networks_for_Heterogeneous_Augmentation_of_Cranial_Defects_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.04883", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Reichardt_360deg_from_a_Single_Camera_A_Few-Shot_Approach_for_LiDAR_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Vandeghen_Adaptive_Self-Training_for_Object_Detection_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2212.05911", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Psaltis_FedLID_Self-Supervised_Federated_Learning_for_Leveraging_Limited_Image_Data_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Sosa_A_Horse_with_no_Labels_Self-Supervised_Horse_Pose_Estimation_from_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.03411", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Nguyen_Boosting_Semi-Supervised_Learning_by_Bridging_high_and_low-Confidence_Predictions_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.07509", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Dawoud_SelectNAdapt_Support_Set_Selection_for_Few-Shot_Domain_Adaptation_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.04946", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Bao_MIAD_A_Maintenance_Inspection_Dataset_for_Unsupervised_Anomaly_Detection_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2211.13968", @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Hong_Enhancing_Classification_Accuracy_on_Limited_Data_via_Unconditional_GAN_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Le_Self-Training_and_Multi-Task_Learning_for_Limited_Data_Evaluation_Study_on_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.06288", @@ -360,6 +374,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Bicsi_JEDI_Joint_Expert_Distillation_in_a_Semi-Supervised_Multi-Dataset_Student-Teacher_Scenario_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.04934", @@ -385,6 +400,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Li_Semantic_RGB-D_Image_Synthesis_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.11356", @@ -410,6 +426,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Shtedritski_Learning_Universal_Semantic_Correspondences_with_No_Supervision_and_Automatic_Data_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -435,6 +452,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Takenaka_Guiding_Video_Prediction_with_Explicit_Procedural_Knowledge_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -460,6 +478,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Lin_Frequency-Aware_Self-Supervised_Long-Tailed_Learning_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.04723", @@ -485,6 +504,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/LIMIT/papers/Sharma_Tensor_Factorization_for_Leveraging_Cross-Modal_Knowledge_in_Data-Constrained_Infrared_Object_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.16592", diff --git a/json_data/w-scene-graphs-and-graph-representation-learning.json b/json_data/w-scene-graphs-and-graph-representation-learning.json index 93e127a..f83688d 100644 --- a/json_data/w-scene-graphs-and-graph-representation-learning.json +++ b/json_data/w-scene-graphs-and-graph-representation-learning.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/SG2RL/papers/Aflalo_DeepCut_Unsupervised_Segmentation_Using_Graph_Neural_Networks_Clustering_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2212.05853", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/SG2RL/papers/Ulger_Relational_Prior_Knowledge_Graphs_for_Detection_and_Instance_Segmentation_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2310.07573", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/SG2RL/papers/Neau_Fine-Grained_is_Too_Coarse_A_Novel_Data-Centric_Approach_for_Efficient_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2305.18668", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/SG2RL/papers/Gillsjo_Polygon_Detection_for_Room_Layout_Estimation_using_Heterogeneous_Graphs_andWireframes_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2306.12203", @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/SG2RL/papers/Farshad_SceneGenie_Scene_Graph_Guided_Diffusion_Models_for_Image_Synthesis_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2304.14573", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/SG2RL/papers/Thauvin_Knowledge_Informed_Sequential_Scene_Graph_Verification_Using_VQA_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/SG2RL/papers/Holm_Dynamic_Scene_Graph_Representation_for_Surgical_Video_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.14538", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/SG2RL/papers/Lorenz_Haystack_A_Panoptic_Scene_Graph_Dataset_to_Evaluate_Rare_Predicate_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.02286", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/SG2RL/papers/Mlodzian_nuScenes_Knowledge_Graph_-_A_Comprehensive_Semantic_Representation_of_Traffic_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2312.09676", @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/SG2RL/papers/Sun_Exploring_the_Road_Graph_in_Trajectory_Forecasting_for_Autonomous_Driving_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/w-to-nerf-or-not-to-nerf.json b/json_data/w-to-nerf-or-not-to-nerf.json index 68f85aa..14f7562 100644 --- a/json_data/w-to-nerf-or-not-to-nerf.json +++ b/json_data/w-to-nerf-or-not-to-nerf.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/RHWC/papers/Zheng_ILSH_The_Imperial_Light-Stage_Head_Dataset_for_Human_Head_View_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2310.03952", @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/RHWC/papers/Jang_VSCHH_2023_A_Benchmark_for_the_View_Synthesis_Challenge_of_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, diff --git a/json_data/w-uncertainty-estimation-for-cv.json b/json_data/w-uncertainty-estimation-for-cv.json index 7faa29b..16a25de 100644 --- a/json_data/w-uncertainty-estimation-for-cv.json +++ b/json_data/w-uncertainty-estimation-for-cv.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Zelenka_A_Simple_and_Explainable_Method_for_Uncertainty_Estimation_Using_Attribute_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Rabbani_Unsupervised_Confidence_Approximation_Trustworthy_Learning_from_Noisy_Labelled_Data_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Ledda_Adversarial_Attacks_Against_Uncertainty_Quantification_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2309.10586", @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Alcover-Couso_Biased_Class_disagreement_detection_of_out_of_distribution_instances_by_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Vojir_Calibrated_Out-of-Distribution_Detection_with_a_Generic_Representation_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2303.13148", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Ali_DELO_Deep_Evidential_LiDAR_Odometry_Using_Partial_Optimal_Transport_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.07153", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Roschewitz_Distance_Matters_For_Improving_Performance_Estimation_Under_Covariate_Shift_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.07223", @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Yao_Dual-Level_Interaction_for_Domain_Adaptive_Semantic_Segmentation_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2307.07972", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Narayanaswamy_Exploring_Inlier_and_Outlier_Specification_for_Improved_Medical_OOD_Detection_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -235,6 +244,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Galesso_Far_Away_in_the_Deep_Space_Dense_Nearest-Neighbor-Based_Out-of-Distribution_Detection_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2211.06660", @@ -260,6 +270,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Venkataramanan_Gaussian_Latent_Representations_for_Uncertainty_Estimation_Using_Mahalanobis_Distance_in_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2305.13849", @@ -285,6 +296,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Hammam_Identifying_Out-of-Domain_Objects_with_Dirichlet_Deep_Neural_Networks_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -310,6 +322,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Baumann_Probabilistic_MIMO_U-Net_Efficient_and_Accurate_Uncertainty_Estimation_for_Pixel-Wise_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.07477", @@ -335,6 +348,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/UnCV/papers/Sandstrom_UncLe-SLAM_Uncertainty_Learning_for_Dense_Neural_SLAM_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2306.11048", diff --git a/json_data/w-what-is-next-in-multimodal-foundation-models.json b/json_data/w-what-is-next-in-multimodal-foundation-models.json index 23104e1..617bece 100644 --- a/json_data/w-what-is-next-in-multimodal-foundation-models.json +++ b/json_data/w-what-is-next-in-multimodal-foundation-models.json @@ -10,6 +10,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/MMFM/papers/Nuthalapati_Coarse_to_Fine_Frame_Selection_for_Online_Open-Ended_Video_Question_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -35,6 +36,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/MMFM/papers/Pan_Retrieving-to-Answer_Zero-Shot_Video_Question_Answering_with_Frozen_Large_Language_Models_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2306.11732", @@ -60,6 +62,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/MMFM/papers/Zonneveld_Video-and-Language_VidL_models_and_their_cognitive_relevance_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -85,6 +88,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/MMFM/papers/Wang_Video_Attribute_Prototype_Network_A_New_Perspective_for_Zero-Shot_Video_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -110,6 +114,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/MMFM/papers/Huang_Interaction-Aware_Prompting_for_Zero-Shot_Spatio-Temporal_Action_Detection_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2304.04688", @@ -135,6 +140,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/MMFM/papers/Zhong_ClipCrop_Conditioned_Cropping_Driven_by_Vision-Language_Model_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2211.11492", @@ -160,6 +166,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/MMFM/papers/Salin_Towards_an_Exhaustive_Evaluation_of_Vision-Language_Foundation_Models_ICCVW_2023_paper.pdf", "paper_arxiv_id": null, @@ -185,6 +192,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/MMFM/papers/Maniparambil_Enhancing_CLIP_with_GPT-4_Harnessing_Visual_Descriptions_as_Prompts_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2307.11661", @@ -210,6 +218,7 @@ "modelscope": null, "gitee": null, "zenodo": null, + "kaggle": null, "demo_page": null, "paper_thecvf": "https://openaccess.thecvf.com/content/ICCV2023W/MMFM/papers/Pourreza_Painter_Teaching_Auto-Regressive_Language_Models_to_Draw_Sketches_ICCVW_2023_paper.pdf", "paper_arxiv_id": "2308.08520",