diff --git a/V3Det.png b/V3Det.png new file mode 100644 index 000000000..e97d5f4bc Binary files /dev/null and b/V3Det.png differ diff --git a/challenge_config.yaml b/challenge_config.yaml index ede451aed..b55bda07d 100755 --- a/challenge_config.yaml +++ b/challenge_config.yaml @@ -1,18 +1,18 @@ # If you are not sure what all these fields mean, please refer our documentation here: # https://evalai.readthedocs.io/en/latest/configuration.html -title: Random Number Generator Challenge -short_description: Random number generation challenge for each submission +title: V3Det Challenge 2024 - Vast Vocabulary Visual Detection +short_description: Join the V3Det Challenge 2024 - Vast Vocabulary Visual Detection, and push the boundaries of object detection! Explore the rich diversity and endless possibilities of the V3Det dataset. description: templates/description.html evaluation_details: templates/evaluation_details.html terms_and_conditions: templates/terms_and_conditions.html -image: logo.jpg +image: V3Det.png submission_guidelines: templates/submission_guidelines.html -leaderboard_description: Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras egestas a libero nec sagittis. +leaderboard_description: Explore the frontiers of object detection in the V3Det Challenge 2024. Witness innovation and precision as global contenders navigate through a vast vocabulary of visual categories. evaluation_script: evaluation_script.zip -remote_evaluation: False +remote_evaluation: True is_docker_based: False -start_date: 2019-01-01 00:00:00 -end_date: 2099-05-31 23:59:59 +start_date: 2024-03-30 00:00:00 +end_date: 2024-05-31 23:59:59 published: True leaderboard: @@ -32,21 +32,37 @@ leaderboard: } } } + - id: 2 + schema: + { + "labels": ["Metric1", "Metric2", "Metric3", "Total"], + "default_order_by": "Total", + "metadata": { + "Metric1": { + "sort_ascending": True, + "description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", + }, + "Metric2": { + "sort_ascending": True, + "description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.", + } + } + } challenge_phases: - id: 1 - name: Dev Phase + name: Development Phase description: templates/challenge_phase_1_description.html - leaderboard_public: False + leaderboard_public: True is_public: True is_submission_public: True - start_date: 2019-01-19 00:00:00 - end_date: 2099-04-25 23:59:59 + start_date: 2024-03-19 00:00:00 + end_date: 2024-05-31 23:59:59 test_annotation_file: annotations/test_annotations_devsplit.json codename: dev - max_submissions_per_day: 5 - max_submissions_per_month: 50 - max_submissions: 50 + max_submissions_per_day: 50 + max_submissions_per_month: 5000 + max_submissions: 50000 default_submission_meta_attributes: - name: method_name is_visible: True @@ -77,18 +93,18 @@ challenge_phases: is_partial_submission_evaluation_enabled: False allowed_submission_file_types: ".json, .zip, .txt, .tsv, .gz, .csv, .h5, .npy, .npz" - id: 2 - name: Test Phase + name: Technical Report Submission Phase description: templates/challenge_phase_2_description.html leaderboard_public: True is_public: True is_submission_public: True - start_date: 2019-01-01 00:00:00 - end_date: 2099-05-24 23:59:59 + start_date: 2024-06-01 00:00:00 + end_date: 2024-06-07 23:59:59 test_annotation_file: annotations/test_annotations_testsplit.json codename: test - max_submissions_per_day: 5 - max_submissions_per_month: 50 - max_submissions: 50 + max_submissions_per_day: 50 + max_submissions_per_month: 5000 + max_submissions: 50000 default_submission_meta_attributes: - name: method_name is_visible: True @@ -115,31 +131,25 @@ challenge_phases: type: boolean is_restricted_to_select_one_submission: False is_partial_submission_evaluation_enabled: False - + dataset_splits: - id: 1 - name: Train Split - codename: train_split + name: OVD + codename: OVD - id: 2 - name: Test Split - codename: test_split + name: Supervised + codename: Supervised challenge_phase_splits: - challenge_phase_id: 1 - leaderboard_id: 1 - dataset_split_id: 1 - visibility: 1 - leaderboard_decimal_precision: 2 - is_leaderboard_order_descending: True - - challenge_phase_id: 2 leaderboard_id: 1 dataset_split_id: 1 visibility: 3 leaderboard_decimal_precision: 2 is_leaderboard_order_descending: True - - challenge_phase_id: 2 - leaderboard_id: 1 + - challenge_phase_id: 1 + leaderboard_id: 2 dataset_split_id: 2 - visibility: 1 + visibility: 3 leaderboard_decimal_precision: 2 is_leaderboard_order_descending: True diff --git a/evaluation_script/__init__.py b/evaluation_script/__init__.py index 3d9cd9480..b0c124845 100644 --- a/evaluation_script/__init__.py +++ b/evaluation_script/__init__.py @@ -1,4 +1,4 @@ -""" + # Q. How to install custom python pip packages? # A. Uncomment the below code to install the custom python packages. @@ -33,11 +33,9 @@ def install_local_package(folder_name): ] ) -install("shapely==1.7.1") -install("requests==2.25.1") - -install_local_package("package_folder_name") - -""" +install("numpy") +install("mmengine") +install("pycocotools") +install("tqdm") -from .main import evaluate +# install_local_package("package_folder_name") diff --git a/evaluation_script/cocoeval_mp.py b/evaluation_script/cocoeval_mp.py new file mode 100644 index 000000000..0450dda56 --- /dev/null +++ b/evaluation_script/cocoeval_mp.py @@ -0,0 +1,355 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import itertools +import multiprocessing as mp +import time +from collections import defaultdict + +import mmengine +import numpy as np +from mmengine.logging import MMLogger +from pycocotools.cocoeval import COCOeval, Params +from tqdm import tqdm + + +class COCOevalMP(COCOeval): + + def __init__(self, cocoGt=None, cocoDt=None, iouType='bbox', num_proc=8, + tree_ann_path='data/V3Det/annotations/v3det_2023_v1_category_tree.json'): + ''' + Initialize CocoEval using coco APIs for gt and dt + :param cocoGt: coco object with ground truth annotations + :param cocoDt: coco object with detection results + :return: None + ''' + if not iouType: + print('iouType not specified. use default iouType segm') + self.cocoGt = cocoGt # ground truth COCO API + self.cocoDt = cocoDt # detections COCO API + self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements + self.eval = {} # accumulated evaluation results + self._gts = defaultdict(list) # gt for evaluation + self._dts = defaultdict(list) # dt for evaluation + self.params = Params(iouType=iouType) # parameters + self._paramsEval = {} # parameters for evaluation + self.stats = [] # result summarization + self.ious = {} # ious between all gts and dts + self.num_proc = num_proc # num of process + self.tree_ann_path = tree_ann_path + if not mmengine.exists(tree_ann_path): + print(f'{tree_ann_path} not exist') + raise FileNotFoundError + if not cocoGt is None: + self.params.imgIds = sorted(cocoGt.getImgIds()) + self.params.catIds = sorted(cocoGt.getCatIds()) + + + def _prepare(self): + ''' + Prepare ._gts and ._dts for evaluation based on params + :return: None + ''' + + def _toMask(anns, coco): + # modify ann['segmentation'] by reference + for ann in anns: + rle = coco.annToRLE(ann) + ann['segmentation'] = rle + + # for each category, maintain its child categories + cat_tree = mmengine.load(self.tree_ann_path) + catid2treeid = cat_tree['categoryid2treeid'] + treeid2catid = {v: k for k, v in catid2treeid.items()} + ori_ancestor2descendant = cat_tree['ancestor2descendant'] + ancestor2descendant = dict() + for k, v in ori_ancestor2descendant.items(): + if k in treeid2catid: + ancestor2descendant[k] = v + ancestor2descendant_catid = defaultdict(set) + for tree_id in ancestor2descendant: + cat_id = treeid2catid[tree_id] + descendant_ids = ancestor2descendant[tree_id] + for descendant_id in descendant_ids: + if descendant_id not in treeid2catid: + continue + descendant_catid = treeid2catid[descendant_id] + ancestor2descendant_catid[int(cat_id)].add(int(descendant_catid)) + self.ancestor2descendant_catid = ancestor2descendant_catid + + p = self.params + if p.useCats: + gts = [] + dts = [] + img_ids = set(p.imgIds) + cat_ids = set(p.catIds) + for gt in self.cocoGt.dataset['annotations']: + if (gt['category_id'] in cat_ids) and (gt['image_id'] + in img_ids): + gts.append(gt) + for dt in self.cocoDt.dataset['annotations']: + if (dt['category_id'] in cat_ids) and (dt['image_id'] + in img_ids): + dts.append(dt) + else: + gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds)) + dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds)) + + # convert ground truth to mask if iouType == 'segm' + if p.iouType == 'segm': + _toMask(gts, self.cocoGt) + _toMask(dts, self.cocoDt) + # set ignore flag + for gt in gts: + gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0 + gt['ignore'] = 'iscrowd' in gt and gt['iscrowd'] + if p.iouType == 'keypoints': + gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore'] + self._gts = defaultdict(list) # gt for evaluation + self._dts = defaultdict(list) # dt for evaluation + for gt in gts: + self._gts[gt['image_id'], gt['category_id']].append(gt) + for dt in dts: + self._dts[dt['image_id'], dt['category_id']].append(dt) + + # If a gt has child category cat_A, and dts of this image has this category, add this gt to gt + for gt in gts: + ignore_cats = [] + for child_cat_id in self.ancestor2descendant_catid[gt['category_id']]: + if len(self._dts[gt['image_id'], child_cat_id]) > 0: + ignore_cats.append(child_cat_id) + if len(ignore_cats) == 0: + continue + ignore_gt = copy.deepcopy(gt) + ignore_gt['category_id'] = ignore_cats + ignore_gt['ignore'] = 1 + for child_cat_id in ignore_cats: + self._gts[gt['image_id'], child_cat_id].append(ignore_gt) + + self.evalImgs = defaultdict( + list) # per-image per-category evaluation results + self.eval = {} # accumulated evaluation results + + def evaluate(self): + """Run per image evaluation on given images and store results (a list + of dict) in self.evalImgs. + + :return: None + """ + tic = time.time() + print('Running per image evaluation...') + p = self.params + # add backward compatibility if useSegm is specified in params + if p.useSegm is not None: + p.iouType = 'segm' if p.useSegm == 1 else 'bbox' + print('useSegm (deprecated) is not None. Running {} evaluation'. + format(p.iouType)) + print('Evaluate annotation type *{}*'.format(p.iouType)) + p.imgIds = list(np.unique(p.imgIds)) + if p.useCats: + p.catIds = list(np.unique(p.catIds)) + p.maxDets = sorted(p.maxDets) + self.params = p + + # loop through images, area range, max detection number + catIds = p.catIds if p.useCats else [-1] + + nproc = 8 + split_size = len(catIds) // nproc + mp_params = [] + for i in range(nproc): + begin = i * split_size + end = (i + 1) * split_size + if i == nproc - 1: + end = len(catIds) + mp_params.append((catIds[begin:end], )) + + MMLogger.get_current_instance().info( + f'start multi processing evaluation with nproc: {nproc}...') + with mp.Pool(nproc) as pool: + self.evalImgs = pool.starmap(self._evaluateImg, mp_params) + + self.evalImgs = list(itertools.chain(*self.evalImgs)) + + self._paramsEval = copy.deepcopy(self.params) + toc = time.time() + print('DONE (t={:0.2f}s).'.format(toc - tic)) + + def _evaluateImg(self, catids_chunk): + self._prepare() + p = self.params + maxDet = max(p.maxDets) + all_params = itertools.product(catids_chunk, p.areaRng, p.imgIds) + all_params_len = len(catids_chunk) * len(p.areaRng) * len(p.imgIds) + evalImgs = [ + self.evaluateImg(imgId, catId, areaRng, maxDet) + for catId, areaRng, imgId in tqdm(all_params, total=all_params_len) + ] + return evalImgs + + def evaluateImg(self, imgId, catId, aRng, maxDet): + p = self.params + if p.useCats: + gt = self._gts[imgId, catId] + dt = self._dts[imgId, catId] + else: + gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]] + dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]] + if len(gt) == 0 and len(dt) == 0: + return None + + for g in gt: + if g['ignore'] or (g['area'] < aRng[0] or g['area'] > aRng[1]): + g['_ignore'] = 1 + else: + g['_ignore'] = 0 + + # sort dt highest score first, sort gt ignore last + gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort') + gt = [gt[i] for i in gtind] + dtind = np.argsort([-d['score'] for d in dt], kind='mergesort') + dt = [dt[i] for i in dtind[0:maxDet]] + iscrowd = [int(o['iscrowd']) for o in gt] + # load computed ious + # ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId] # noqa + ious = self.computeIoU(imgId, catId) + ious = ious[:, gtind] if len(ious) > 0 else ious + + T = len(p.iouThrs) + G = len(gt) + D = len(dt) + gtm = np.zeros((T, G)) + dtm = np.zeros((T, D)) + gtIg = np.array([g['_ignore'] for g in gt]) + dtIg = np.zeros((T, D)) + if not len(ious) == 0: + for tind, t in enumerate(p.iouThrs): + for dind, d in enumerate(dt): + # information about best match so far (m=-1 -> unmatched) + iou = min([t, 1 - 1e-10]) + m = -1 + for gind, g in enumerate(gt): + # if this gt already matched, and not a crowd, continue + if gtm[tind, gind] > 0 and not iscrowd[gind]: + continue + # if dt matched to reg gt, and on ignore gt, stop + if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1: + break + # continue to next gt unless better match made + if ious[dind, gind] < iou: + continue + # if match successful and best so far, + # store appropriately + iou = ious[dind, gind] + m = gind + # if match made store id of match for both dt and gt + if m == -1: + continue + dtIg[tind, dind] = gtIg[m] + dtm[tind, dind] = gt[m]['id'] + gtm[tind, m] = d['id'] + # set unmatched detections outside of area range to ignore + a = np.array([d['area'] < aRng[0] or d['area'] > aRng[1] + for d in dt]).reshape((1, len(dt))) + dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, + 0))) + # store results for given image and category + + return { + 'image_id': imgId, + 'category_id': catId, + 'aRng': aRng, + 'maxDet': maxDet, + 'dtIds': [d['id'] for d in dt], + 'gtIds': [g['id'] for g in gt], + 'dtMatches': dtm, + 'gtMatches': gtm, + 'dtScores': [d['score'] for d in dt], + 'gtIgnore': gtIg, + 'dtIgnore': dtIg, + } + + def summarize(self): + """Compute and display summary metrics for evaluation results. + + Note this function can *only* be applied on the default parameter + setting + """ + + def _summarize(ap=1, iouThr=None, areaRng='all', maxDets=100): + p = self.params + iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}' # noqa + titleStr = 'Average Precision' if ap == 1 else 'Average Recall' + typeStr = '(AP)' if ap == 1 else '(AR)' + iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \ + if iouThr is None else '{:0.2f}'.format(iouThr) + + aind = [ + i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng + ] + mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] + if ap == 1: + # dimension of precision: [TxRxKxAxM] + s = self.eval['precision'] + # IoU + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, :, aind, mind] + else: + # dimension of recall: [TxKxAxM] + s = self.eval['recall'] + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, aind, mind] + if len(s[s > -1]) == 0: + mean_s = -1 + else: + mean_s = np.mean(s[s > -1]) + print( + iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, + mean_s)) + return mean_s + + def _summarizeDets(): + stats = [] + stats.append(_summarize(1, maxDets=self.params.maxDets[-1])) + stats.append( + _summarize(1, iouThr=.5, maxDets=self.params.maxDets[-1])) + stats.append( + _summarize(1, iouThr=.75, maxDets=self.params.maxDets[-1])) + for area_rng in ('small', 'medium', 'large'): + stats.append( + _summarize( + 1, areaRng=area_rng, maxDets=self.params.maxDets[-1])) + for max_det in self.params.maxDets: + stats.append(_summarize(0, maxDets=max_det)) + for area_rng in ('small', 'medium', 'large'): + stats.append( + _summarize( + 0, areaRng=area_rng, maxDets=self.params.maxDets[-1])) + stats = np.array(stats) + return stats + + def _summarizeKps(): + stats = np.zeros((10, )) + stats[0] = _summarize(1, maxDets=20) + stats[1] = _summarize(1, maxDets=20, iouThr=.5) + stats[2] = _summarize(1, maxDets=20, iouThr=.75) + stats[3] = _summarize(1, maxDets=20, areaRng='medium') + stats[4] = _summarize(1, maxDets=20, areaRng='large') + stats[5] = _summarize(0, maxDets=20) + stats[6] = _summarize(0, maxDets=20, iouThr=.5) + stats[7] = _summarize(0, maxDets=20, iouThr=.75) + stats[8] = _summarize(0, maxDets=20, areaRng='medium') + stats[9] = _summarize(0, maxDets=20, areaRng='large') + return stats + + if not self.eval: + raise Exception('Please run accumulate() first') + iouType = self.params.iouType + if iouType == 'segm' or iouType == 'bbox': + summarize = _summarizeDets + elif iouType == 'keypoints': + summarize = _summarizeKps + self.stats = summarize() \ No newline at end of file diff --git a/evaluation_script/main.py b/evaluation_script/main.py index 61c73d9b5..1eccfc772 100644 --- a/evaluation_script/main.py +++ b/evaluation_script/main.py @@ -1,5 +1,6 @@ import random - +from pycocotools.coco import COCO +from evaluation_script.cocoeval_mp import COCOevalMP def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwargs): print("Starting Evaluation.....") @@ -39,43 +40,53 @@ def evaluate(test_annotation_file, user_submission_file, phase_codename, **kwarg 'submitted_at': u'2017-03-20T19:22:03.880652Z' } """ - output = {} - if phase_codename == "dev": - print("Evaluating for Dev Phase") - output["result"] = [ - { - "train_split": { - "Metric1": random.randint(0, 99), - "Metric2": random.randint(0, 99), - "Metric3": random.randint(0, 99), - "Total": random.randint(0, 99), - } - } - ] - # To display the results in the result file - output["submission_result"] = output["result"][0]["train_split"] - print("Completed evaluation for Dev Phase") - elif phase_codename == "test": - print("Evaluating for Test Phase") - output["result"] = [ - { - "train_split": { - "Metric1": random.randint(0, 99), - "Metric2": random.randint(0, 99), - "Metric3": random.randint(0, 99), - "Total": random.randint(0, 99), - } - }, - { - "test_split": { - "Metric1": random.randint(0, 99), - "Metric2": random.randint(0, 99), - "Metric3": random.randint(0, 99), - "Total": random.randint(0, 99), - } - }, - ] - # To display the results in the result file - output["submission_result"] = output["result"][0] - print("Completed evaluation for Test Phase") + v3det_gt = COCO(test_annotation_file) # gt annotation file + v3det_dt = v3det_gt.loadRes(user_submission_file) # coco-format det results + v3det_eval = COCOevalMP(v3det_gt, v3det_dt, 'bbox', num_proc=8) + v3det_eval.params.maxDets = [300] + + v3det_eval.evaluate() + v3det_eval.accumulate() + v3det_eval.summarize() + # output = {} + # if phase_codename == "dev": + # print("Evaluating for Dev Phase") + # output["result"] = [ + # { + # "train_split": { + # "Metric1": random.randint(0, 99), + # "Metric2": random.randint(0, 99), + # "Metric3": random.randint(0, 99), + # "Total": random.randint(0, 99), + # } + # } + # ] + # # To display the results in the result file + # output["submission_result"] = output["result"][0]["train_split"] + # print("Completed evaluation for Dev Phase") + # elif phase_codename == "test": + # print("Evaluating for Test Phase") + # output["result"] = [ + # { + # "train_split": { + # "Metric1": random.randint(0, 99), + # "Metric2": random.randint(0, 99), + # "Metric3": random.randint(0, 99), + # "Total": random.randint(0, 99), + # } + # }, + # { + # "test_split": { + # "Metric1": random.randint(0, 99), + # "Metric2": random.randint(0, 99), + # "Metric3": random.randint(0, 99), + # "Total": random.randint(0, 99), + # } + # }, + # ] + # # To display the results in the result file + # output["submission_result"] = output["result"][0] + # print("Completed evaluation for Test Phase") + output = dict() + output['result'] = "empty" return output diff --git a/github/host_config.json b/github/host_config.json index e7ead6158..88d40bd71 100644 --- a/github/host_config.json +++ b/github/host_config.json @@ -1,5 +1,5 @@ { - "token": "", - "team_pk": "", - "evalai_host_url": "" + "token": "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ0b2tlbl90eXBlIjoicmVmcmVzaCIsImV4cCI6MTc0MjI4ODIxMCwianRpIjoiM2VkNzU1ZGFlYzMxNDAxOGFmN2UyOTkwYmJlMTk1MWQiLCJ1c2VyX2lkIjo0MTgxNH0.Iofvc2KanYyTEOpxxyQco0awXYeLDtik8sFH7n-4OHU", + "team_pk": "3194", + "evalai_host_url": "https://eval.ai" } diff --git a/templates/V3Det.png b/templates/V3Det.png new file mode 100644 index 000000000..e97d5f4bc Binary files /dev/null and b/templates/V3Det.png differ diff --git a/templates/challenge_phase_1_description.html b/templates/challenge_phase_1_description.html index 98907f595..70aab2c57 100755 --- a/templates/challenge_phase_1_description.html +++ b/templates/challenge_phase_1_description.html @@ -1 +1 @@ -

"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?"

\ No newline at end of file +

First is the Development (Dev) Phase, where contestants are tasked with crafting their unique approaches and applying them to the V3Det dataset. This stage is all about experimentation and innovation. Participants are required to test their methods on V3Det and submit their results for evaluation. Precision is key, as their results will be assessed for accuracy and then showcased on the leaderboard, providing a real-time ranking of their performance.All test outcomes should be uploaded in a JSON format, ensuring a standardized and efficient evaluation process.

diff --git a/templates/challenge_phase_2_description.html b/templates/challenge_phase_2_description.html index 7de79f9a2..a1a697c66 100755 --- a/templates/challenge_phase_2_description.html +++ b/templates/challenge_phase_2_description.html @@ -1 +1 @@ -"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?" \ No newline at end of file +In this critical stage, competitors have one week to submit a detailed technical report. This report should encompass their methodological and data specifics. While including ablation studies is recommended to showcase the effectiveness of different components, it is not mandatory. This phase offers participants the chance to demonstrate the depth and robustness of their methods. The thoroughness of submissions is vital as they provide key insights into the strategies behind their results. diff --git a/templates/description.html b/templates/description.html index 2ee4109f9..2ebafa454 100755 --- a/templates/description.html +++ b/templates/description.html @@ -1,3 +1,25 @@ -

"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?"

+

V3Det: Vast Vocabulary Visual Detection Dataset

+

😊Dataset|πŸ’»Github|πŸ“–Paper|πŸ“„arXiv

+ +

This challenge offers participants an automated submission and evaluation system, along with a leaderboard, to submit your model's test results on the test set.

+ +

Competition Introduction

+

The V3Det Challenge 2024 - Vast Vocabulary Visual Detection invites visual recognition enthusiasts and experts worldwide to reshape the field of object detection using our revolutionary V3Det dataset. This dataset not only encompasses objects from 13,204 categories, ten times the size of existing large vocabulary object detection datasets, but also emphasizes the hierarchical and interrelated nature of categories, providing an ideal testbed for research in extensive and open vocabulary object detection. The rich annotations of V3Det, meticulously provided by human experts, ensure high precision and in-depth interpretation of the data. By participating in this challenge, you will not only have the opportunity to showcase your technical prowess but also contribute to the future development of the field of object detection.

+ +

Dataset Overview

+

We introduce V3Det, a vast vocabulary visual detection dataset meticulously crafted to advance the field of general visual object detection. This dataset addresses the challenge of detecting arbitrary objects in real-world scenarios, where traditional object detection datasets are often limited by their relatively restricted vocabulary.

+ +

V3Det stands out with its extensive range of features:

+ +

Vast Vocabulary: It encompasses bounding boxes for objects across 13,204 categories in real-world images. This scale is ten times larger than any existing large vocabulary object detection dataset, like LVIS, setting a new benchmark in dataset size and diversity.

+ +

Hierarchical Category Organization: The categories in V3Det are thoughtfully arranged in a hierarchical category tree. This structure not only delineates the inclusion relationships among different categories but also fosters an exploration into the complex interconnections and relationships within vast and open vocabulary object detection scenarios.

+ +

Rich Annotations: The dataset includes 243k images, each precisely annotated with bounding boxes to ensure accurate object identification and localization. Furthermore, V3Det offers professional descriptions for each category, crafted by human experts. This level of detail aids significantly in the interpretation and understanding of the dataset.

+ +V3Det + +

V3Det is more than just a dataset; it’s an exploration space that allows for comprehensive benchmarks in both vast and open vocabulary object detection. It’s poised to yield new observations, practices, and insights, thereby contributing to future research in the field. With its expansive scope and detailed annotations, V3Det is uniquely positioned to be a cornerstone dataset for the development of more generalized visual perception systems.

+ + -

"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?"

diff --git a/templates/evaluation_details.html b/templates/evaluation_details.html index 14bf424cf..adca7a63c 100755 --- a/templates/evaluation_details.html +++ b/templates/evaluation_details.html @@ -1 +1 @@ -

"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."

+

"Our test consists of two segments: supervised learning and open vocabulary detection. We will evaluate the outcomes in these two different settings and display them on the leaderboard."

diff --git a/templates/terms_and_conditions.html b/templates/terms_and_conditions.html index 12e9f60ab..98bcb09e3 100755 --- a/templates/terms_and_conditions.html +++ b/templates/terms_and_conditions.html @@ -1 +1 @@ -

Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.

+