-
Notifications
You must be signed in to change notification settings - Fork 0
/
index.html
869 lines (785 loc) · 62.8 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
<!DOCTYPE html>
<!--[if lt IE 7 ]><html class="ie ie6" lang="en"> <![endif]-->
<!--[if IE 7 ]><html class="ie ie7" lang="en"> <![endif]-->
<!--[if IE 8 ]><html class="ie ie8" lang="en"> <![endif]-->
<!--[if (gte IE 9)|!(IE)]><!--><html lang="en"> <!--<![endif]-->
<head>
<!-- Basic Page Needs
================================================== -->
<meta charset="utf-8">
<title>BED4RS</title>
<meta name="description" content="">
<meta name="author" content="">
<!--[if lt IE 9]>
<script src="http://html5shim.googlecode.com/svn/trunk/html5.js"></script>
<![endif]-->
<!-- Mobile Specific Metas
================================================== -->
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1, user-scalable=0">
<!-- CSS
================================================== -->
<link rel="stylesheet" href="css/base.css">
<link rel="stylesheet" href="css/skeleton.css">
<link rel="stylesheet" href="css/screen.css">
<!--[if IE 7]>
<link rel="stylesheet" href="stylesheets/ie7.css">
<![endif]-->
<!-- Favicons
================================================== -->
<link rel="shortcut icon" href="images/captain-icon.png">
<link rel="apple-touch-icon" href="images/captain-icon.png">
<link rel="apple-touch-icon" sizes="72x72" href="images/captain-icon-72x72.png">
<link rel="apple-touch-icon" sizes="114x114" href="images/captain-icon-114x114.png">
<!-- Fonts
================================================== -->
<link href='http://fonts.googleapis.com/css?family=Bitter' rel='stylesheet' type='text/css'>
<style type="text/css">
li {font-size: 15px}
p {font-size: 15px}
.arrow-bottom {
width: 0;
height: 0;
border-left: 4px solid transparent; /* 左边框的宽 */
border-right: 4px solid transparent; /* 右边框的宽 */
border-top: 5px solid rgb(125, 125, 125); /* 下边框的长度|高,以及背景色 */
font-size: 0;
line-height: 0;
}
</style>
</head>
<body>
<!-- Site Backgrounds
================================================== -->
<!-- Change to class="poswrapheaderline wide" and class="headerline full" for a full-width header line -->
<!-- <div class="poswrapheaderline"><div class="headerline"></div></div> -->
<!-- Remove or uncomment depending on if you want a background image or tile -->
<!-- <div class="tiledbackground"></div> -->
<!-- <img src="images/bg.png" alt="" id="background" /> -->
<!-- Change to class="poswrapper wide" and class="whitebackground full" for a full-width site background -->
<div class="poswrapper"><div class="whitebackground"></div></div>
<div class="container main portfolio6column">
<!-- Header | Logo, Menu
================================================== -->
<div class="logo sixteen columns" style="padding-top: 30px"><img style="width: 77%;height: 77%;margin-bottom: 5px;" src="images/head.svg" alt="" /></div>
<!-- Content Holder -->
<div class="seventeen columns offset-by-one row content center" style="padding-left: 0px; padding-top: 20px">
<!-- Content Tabs -->
<div class="seventeen columns header" style="padding: 0px;">
<div class="mainmenu seventeen columns">
<div id="mainmenu" class="ddsmoothmenu" style="width: twelve columns">
<ul class="tabs">
<li style="width: 120px; text-align:center"><a href="#concept" style="font-size: 13.5px; font-weight: bold; padding:0px 0px; color:rgb(50, 50, 50); ">Home</a></li>
<li style="width: 120px; text-align:center"><a style="font-size: 13.5px; font-weight: bold; padding:0px 15px">Datasets <span class="arrow-bottom"></span></a>
<ul class="tabs" style="width: 117px; margin-left: -1px;">
<li style="background-color:#fff" id="menu-dota"> <a style="width:auto; font-size: 13px" href="#description-dota">DOTA</a></li>
<li style="background-color:#fff" id="menu-millionaid"><a style="width:auto; font-size: 13px" href="#description-millionaid">Million-AID</a></li>
<li style="background-color:#fff" id="menu-aid"> <a style="width:auto; font-size: 13px" href="#description-aid">AID</a></li>
<li style="background-color:#fff" id="menu-whurs19"> <a style="width:auto; font-size: 13px" href="#description-whurs19">WHU-RS19</a></li>
<li style="background-color:#fff" id="menu-nasc-tg2"> <a style="width:auto; font-size: 13px" href="#description-nasc-tg2">NaSC-TG2</a></li>
<li style="background-color:#fff" id="menu-gid"> <a style="width:auto; font-size: 13px" href="#description-gid">GID</a></li>
<li style="background-color:#fff" id="menu-gid"> <a style="width:auto; font-size: 13px" href="#description-second">SECOND</a></li>
<li style="background-color:#fff" id="menu-uavid"> <a style="width:auto; font-size: 13px" href="#description-uavid">UAVid</a></li>
<li style="background-color:#fff" id="menu-isaid"> <a style="width:auto; font-size: 13px" href="#description-isaid">iSAID</a></li>
</ul>
</li>
<li style="width: 120px; text-align:center"><a style="font-size: 13.5px; font-weight: bold; padding:0px 15px">Benchmarks <span class="arrow-bottom"></span></a>
<ul class="tabs" style="width: 117px; margin-left: -1px;">
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://captain-whu.github.io/DOTA/results_dota2.html" target="_blank">DOTA-v2.0</a></li>
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://captain-whu.github.io/DOTA/results_dota15.html" target="_blank">DOTA-v1.5</a></li>
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://captain-whu.github.io/DOTA/results.html" target="_blank">DOTA-v1.0</a></li>
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://captain-whu.github.io/iSAID/results.html" target="_blank">iSAID</a></li>
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://competitions.codalab.org/competitions/25224#results" target="_blank">UAVid</a></li>
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://captain-whu.github.io/DiRS/#downeva" target="_blank">Million-AID</a></li>
</ul>
</li>
<li style="width: 120px; text-align:center"><a style="font-size: 13.5px; font-weight: bold; padding:0px 15px">Challenges <span class="arrow-bottom"></span></a>
<ul class="tabs" style="width: 117px; margin-left: -1px;">
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://captain-whu.github.io/LUAI2021/challenge.html" target="_blank">LUAI-ICCV'2021</a></li> <!-- ICCV'2021-->
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="" target="_blank">IIAI-PRCV'2021</a></li> <!-- PRCV'2021-->
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://captain-whu.github.io/DOAI2019/challenge.html" target="_blank">ODAI-CVPR'2019</a></li> <!-- CVPR'2019-->
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://autdatamotion.github.io/RSC2019/#/home" target="_blank">SRIASRI 2019</a></li> <!-- 稀疏表征与智能分析竞赛 2019-->
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://captain-whu.github.io/ODAI/" target="_blank">ODAI-ICPR'2018</a></li> <!-- ICPR'2018-->
</ul>
</li>
<li style="width: 120px; text-align:center"><a style="font-size: 13.5px; font-weight: bold; padding:0px 15px">Workshops <span class="arrow-bottom"></span></a>
<ul class="tabs" style="width: 117px; margin-left: -1px;">
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://captain-whu.github.io/LUAI2021/" target="_blank">LUAI ICCV'2021</a></li>
<li style="background-color:#fff"><a style="width:auto; font-size: 13px" href="https://captain-whu.github.io/DOAI2019/index.html" target="_blank">ODAI CVPR'2019</a></li> <!-- DOAI2019 -->
</ul>
</li>
<li style="width: 120px; text-align:center"><a href="#support" style="font-size: 13.5px; font-weight: bold; padding:0px 0px;">Contact</a></li>
</ul>
<br style="clear: left" />
</div>
</div>
</div>
<ul class="tabs-content clearfix">
<li class="active clearfix" id="concept" style="padding-top: 20px">
<div class="seventeen columns" style="padding-top: 1px;width: 99%;">
<h4>Latest News</h4>
<div class="widget_portfolio">
<ul style="list-style-type: disc;">
<li class="clearfix"><a href="#" class="borderhover" style="text-indent: 1em"> <strong>2021-05-12</strong> A workshop and challenge on <a href="https://captain-whu.github.io/LUAI2021/challenge.html" target="_blank" style="font-style: italic; text-decoration: underline"> Learning to Understand Aerial Images (LUAI)</a> will be held in conjunction with <a href="http://iccv2021.thecvf.com/home" style="font-style: italic; text-decoration: underline" target="_blank">IEEE ICCV 2021</a>!</a></li>
<li class="clearfix"><a href="#" class="borderhover" style="text-indent: 1em"> <strong>2021-04-30</strong> A challenge on <a href="#" target="_blank" style="font-style: italic; text-decoration: underline">Intelligent Interpretation of Aerial Images (IIAI)</a> in conjunction with <a href="http://www.prcv.cn/index_en.html" style="font-style: italic; text-decoration: underline" target="_blank"> PRCV 2021</a> will be held!</li>
<li class="clearfix"><a href="#" class="borderhover" style="text-indent: 1em"> <strong>2021-02-05</strong> A new benchmark DOTA-v2.0, including dataset, code library, and 70 baselines, is released.</a></li>
<li class="clearfix"><a href="#" class="borderhover" style="text-indent: 1em"> <strong>2020-07-20</strong> UAVid'2020, a dataset for UAV Video Semantic Segmentation, is now available online.</a></li>
</ul>
<div class="clear"></div>
</div>
</div>
<div class="seventeen columns row divide notop" style="padding-top: 20px;width: 99%">
<h4 class="titledivider" style="background: #FFFFFF">Datasets</h4>
<div class="dividerline ten columns" style="width: 98%"></div>
</div>
<div class="seventeen columns row teasers portfolio" style="width: 99%">
<div class="one_third teaser all-group web-group">
<a href="https://captain-whu.github.io/DOTA/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-thumbs/DOTA.png" alt="" class="scale-with-grid" /></a>
<div class="pluswrap" style="padding-right: 0px;">
<!-- <a href="#" class="bigplus"></a> -->
<div class="topline"><a href="#">DOTA</a></div>
<div class="subline" style="width: 265px">Aerial Oriented Object Detection </div>
<div class="subline" style="width: 265px">1.8 million instances, 11268 images, 18 classes. </div>
</div>
</div>
<div class="one_third teaser all-group photoshop-group">
<a href="https://captain-whu.github.io/DiRS/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-thumbs/Million-AID.png" alt="" class="scale-with-grid" /></a>
<div class="pluswrap" style="padding-right: 0px;">
<!-- <a href="#" class="bigplus"></a> -->
<div class="topline"><a href="#">Million-AID</a></div>
<div class="subline" style="width: 265px">Aerial Scene Classification</div>
<div class="subline" style="width: 265px">1 million images (100<sup style="line-height: 2px;vertical-align: top;">2</sup>~ 30000<sup style="line-height: 2px;vertical-align: top;">2</sup>), 51 classes.</div>
</div>
</div>
<div class="one_third lastcolumn teaser all-group concepts-group">
<a href="https://captain-whu.github.io/AID/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-thumbs/AID.png" alt="" class="scale-with-grid" /></a>
<div class="pluswrap" style="padding-right: 0px;">
<!-- <a href="#" class="bigplus"></a> -->
<div class="topline"><a href="#">AID</a></div>
<div class="subline" style="width: 265px">Aerial Scene Classification</div>
<div class="subline" style="width: 265px">10k images (600 * 600), 30 classes.</div>
</div>
</div>
<div class="one_third teaser all-group web-group">
<ul class="tabs" style="margin: 0px 0px; padding: 0px 0px; border:0px">
<li style="margin: 0px 0px; padding: 0px 0px; border:0px;width: auto; height: auto;">
<a href="#description-whurs19" target="_blank" data-text="» Visit Project" class="hovering" style="margin: 0px 0px; padding: 0px 0px; border:0px;width: auto; height: auto;"><img src="images/dataset-thumbs/WHU-RS19.png" alt="" class="scale-with-grid" /></a>
<div class="pluswrap" style="padding-right: 0px;">
<!-- <a href="#" class="bigplus"></a> -->
<div class="topline"><a href="#" style="margin:0px 0px;padding: 0px 0px; border: 0; height: auto; line-height: 12px;">WHU-RS19</a></div>
<div class="subline" style="width: 265px">Aerial Scene Classification</div>
<div class="subline" style="width: 265px">1005 images (600 * 600), 19 classes.</div>
</div>
</li>
</ul>
</div>
<div class="one_third teaser all-group photoshop-group">
<ul class="tabs" style="margin: 0px 0px; padding: 0px 0px; border:0px">
<li style="margin: 0px 0px; padding: 0px 0px; border:0px;width: auto; height: auto;">
<a href="#description-nasc-tg2" target="_blank" data-text="» Visit Project" class="hovering" style="margin: 0px 0px; padding: 0px 0px; border:0px;width: auto; height: auto;"><img src="images/dataset-thumbs/NaSC-TG2.png" alt="" class="scale-with-grid" /></a>
<div class="pluswrap" style="padding-right: 0px;">
<!-- <a href="#" class="bigplus"></a> -->
<div class="topline"><a href="#" style="margin:0px 0px;padding: 0px 0px; border: 0; height: auto; line-height: 12px;">NaSC-TG2</a></div>
<div class="subline" style="width: 265px">Aerial Scene Classification</div>
<div class="subline" style="width: 265px">20000 images (128 * 128), 10 classes.</div>
</div>
</li>
</ul>
</div>
<div class="one_third lastcolumn teaser all-group concepts-group">
<a href="https://captain-whu.github.io/GID/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-thumbs/GID.png" alt="" class="scale-with-grid" /></a>
<div class="pluswrap" style="padding-right: 0px;">
<!-- <a href="#" class="bigplus"></a> -->
<div class="topline"><a href="#">GID</a></div>
<div class="subline" style="width: 265px">Land Use Classification</div>
<div class="subline" style="width: 265px">150 images (6800 * 7200), 15 classes. </div>
</div>
</div>
<div class="one_third teaser all-group web-group">
<a href="https://captain-whu.github.io/SCD/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-thumbs/SECOND.png" alt="" class="scale-with-grid" /></a>
<div class="pluswrap" style="padding-right: 0px;">
<!-- <a href="#" class="bigplus"></a> -->
<div class="topline"><a href="#">SECOND</a></div>
<div class="subline" style="width: 265px">Aerial Change Detection</div>
<div class="subline" style="width: 265px">4662 image pairs (512 * 512), 30 classes.</div>
</div>
</div>
<div class="one_third teaser all-group photoshop-group">
<a href="https://www.uavid.nl/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-thumbs/UAVid.png" alt="" class="scale-with-grid" /></a>
<div class="pluswrap" style="padding-right: 0px;">
<!-- <a href="#" class="bigplus"></a> -->
<div class="topline"><a href="#">UAVid</a></div>
<div class="subline" style="width: 265px">UAV Video Semantic Segmentation</div>
<div class="subline" style="width: 265px">42 video sequences (~4000 * 2160), 8 classes.</div>
</div>
</div>
<div class="one_third lastcolumn teaser all-group concepts-group">
<a href="https://captain-whu.github.io/iSAID/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-thumbs/iSAID.png" alt="" class="scale-with-grid" /></a>
<div class="pluswrap" style="padding-right: 0px;">
<!-- <a href="#" class="bigplus"></a> -->
<div class="topline"><a href="#">iSAID</a></div>
<div class="subline" style="width: 265PX">Aerial Image Instance Segmentation</div>
<div class="subline" style="width: 265px">655451 instances, 2806 images, 15 classes.</div>
</div>
</div>
</div>
</li>
<li id="description-dota" class="clearfix" style="padding-top: 20px">
<p style="text-align: justify;"><a href="https://captain-whu.github.io/DOTA/" target="_blank" class="hovering"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">DOTA</strong></a> is a large-scale dataset for object detection in aerial images. It can be used to develop and evaluate object detectors in aerial images. The images are collected from different sensors and platforms. Each image is of the size in the range from 800 × 800 to 20,000 × 20,000 pixels and contains objects exhibiting a wide variety of scales, orientations, and shapes. The instances in DOTA images are annotated by experts in aerial image interpretation by arbitrary (8 d.o.f.) quadrilateral. We will continue to update DOTA, to grow in size and scope to reflect evolving real-world conditions. Now it has three versions:</p>
<div class="widget_portfolio" style="text-align: justify;">
<ul style="list-style-type: disc;">
<li><a href="https://captain-whu.github.io/DOTA/dataset.html" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">DOTA-v1.0</strong></a> contains 15 common categories, 2,806 images and 188, 282 instances. The proportions of the training set, validation set, and testing set in DOTA-v1.0 are 1/2, 1/6, and 1/3, respectively.</li>
<li><a href="https://captain-whu.github.io/DOTA/dataset.html" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">DOTA-v1.5</strong></a> uses the same images as DOTA-v1.0, but the extremely small instances (less than 10 pixels) are also annotated. Moreover, a new category, ”container crane” is added. It contains 403,318 instances in total. The number of images and dataset splits are the same as DOTA-v1.0. This version was released for the DOAI Challenge 2019 on Object Detection in Aerial Images in conjunction with IEEE CVPR 2019.</li>
<li><a href="https://captain-whu.github.io/DOTA/dataset.html" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">DOTA-v2.0</strong></a> collects more Google Earth, GF-2 Satellite, and aerial images. There are 18 common categories, 11,268 images and 1,793,658 instances in DOTA-v2.0. Compared to DOTA-v1.5, it further adds the new categories of ”airport” and ”helipad”. The 11,268 images of DOTA are split into training, validation, test-dev, and test-challenge sets. To avoid the problem of overfitting, the proportion of training and validation set is smaller than the test set. Furthermore, we have two test sets, namely test-dev and test-challenge. Training contains 1,830 images and 268,627 instances. Validation contains 593 images and 81,048 instances. We released the images and ground truths for training and validation sets. Test-dev contains 2,792 images and 353,346 instances. We released the images but not the ground truths. Test-challenge contains 6,053 images and 1,090,637 instances. The images and ground truths of test-challenge will be available only during the challenging.</li>
</ul>
<div class="clear"></div>
</div>
<div class="clearfix" align="center">
<a href="https://captain-whu.github.io/DOTA/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-images/DOTA.jpg" alt="" class="scale-with-grid" /></a>
</div>
<div style="padding-top: 30px">
<h5>Data Download</h5>
<ul style="list-style-type: disc;">
<li>
<p>Download
<a href="https://captain-whu.github.io/DOTA/dataset.html" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">DOTA-v1.0</strong></a>,
<a href="https://captain-whu.github.io/DOTA/dataset.html" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">DOTA-v1.5</strong></a>,
<a href="https://captain-whu.github.io/DOTA/dataset.html" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">DOTA-v2.0</strong></a>
</p>
</li>
</ul>
</div>
<div style="padding-top: 10px">
<h5>Citation</h5>
<p style="margin-bottom: 10px">If you make use of DOTA, please cite our following works:</p>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@misc{ding2021object, <br>
title={Object Detection in Aerial Images: A Large-Scale Benchmark and Challenges}, <br>
author={Jian Ding and Nan Xue and Gui-Song Xia and Xiang Bai and Wen Yang and Micheal Ying Yang and Serge Belongie and Jiebo Luo and Mihai Datcu and Marcello Pelillo and Liangpei Zhang}, <br>
year={2021}, <br>
eprint={2102.12219}, <br>
archivePrefix={arXiv}, <br>
primaryClass={cs.CV} <br>
}
</p>
</div>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@InProceedings{Xia_2018_CVPR, <br>
title={DOTA: A Large-Scale Dataset for Object Detection in Aerial Images}, <br>
author={Gui-Song Xia and Xiang Bai and Jian Ding and Zhen Zhu and Serge Belongie and Jiebo Luo and Mihai Datcu and Marcello Pelillo and Liangpei Zhang}, <br>
booktitle={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, <br>
month={June}, <br>
year={2018}
}
</p>
</div>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@InProceedings{Ding_2019_CVPR, <br>
title={Learning RoI Transformer for Detecting Oriented Objects in Aerial Images}, <br>
author={Jian Ding, Nan Xue, Yang Long, Gui-Song Xia, Qikai Lu}, <br>
booktitle={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, <br>
month={June}, <br>
year={2019} <br>
}
</p>
</div>
</div>
<div style="padding-top: 10px">
<h5>Contact</h5>
<p style="font-size: 15px; line-height: 21px; margin-bottom:10px;">If you have any the problem or feedback in using DOTA, please contact</p>
<ul style="list-style-type: disc;">
<li><strong>Jian Ding</strong> at: <strong>[email protected]</strong></li>
<li><strong>Gui-Song Xia</strong> at: <strong>[email protected]</strong></li>
</ul>
<div class="clear"></div>
</div>
</li>
<li id="description-millionaid" class="clearfix" style="padding-top: 20px">
<p style="text-align: justify;"><a href="https://captain-whu.github.io/DiRS/" target="_blank" class="hovering"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">Million-AID</strong></a> is a new large-scale benchmark dataset containing a million instances for RS scene classification. There are 51 semantic scene categories in Million-AID. And the scene categories are customized to match the land-use classification standards, which greatly enhance the practicability of the constructed Million-AID. Different form the existing scene classification datasets of which categories are organized with parallel or uncertain relationships, scene categories in Million-AID are organized with systematic relationship architecture, giving it superiority in management and scalability. Specifically, the scene categories in Million-AID are organized by the hierarchical category network of a three-level tree: 51 leaf nodes fall into 28 parent nodes at the second level which are grouped into 8 nodes at the first level, representing the 8 underlying scene categories of agriculture land, commercial land, industrial land, public service land, residential land, transportation land, unutilized land, and water area. The scene category network provides the dataset with excellent organization of relationship among different scene categories and also the property of scalability. The number of images in each scene category ranges from 2,000 to 45,000, endowing the dataset with the property of long tail distribution. Besides, Million-AID has superiorities over the existing scene classification datasets owing to its high spatial resolution, large scale, and global distribution. </p>
<div class="clearfix" align="center">
<a href="https://captain-whu.github.io/DiRS/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-images/Million-AID.svg" alt="" class="scale-with-grid" /></a>
</div>
<div style="padding-top: 30px">
<h5>Data Download</h5>
<ul style="list-style-type: disc;">
<li>
<p>
<a href="https://captain-whu.github.io/DiRS/" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">Download Million-AID</strong></a>
</p>
</li>
</ul>
</div>
<div style="padding-top: 10px">
<h5>Citation</h5>
<p style="margin-bottom: 10px">If you make use of Million-AID, please cite our following work:</p>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@Article{Long2021DiRS, <br>
title={On Creating Benchmark Dataset for Aerial Image Interpretation: Reviews, Guidances and Million-AID}, <br>
author={Yang Long and Gui-Song Xia and Shengyang Li and Wen Yang and Michael Ying Yang and Xiao Xiang Zhu and Liangpei Zhang and Deren Li}, <br>
journal={IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, <br>
year={2021}, <br>
volume={14}, <br>
pages={4205-4230} <br>
}
</p>
</div>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@misc{Long2022ASP, <br>
title={Aerial Scene Parsing: From Tile-level Scene Classification to Pixel-wise Semantic Labeling}, <br>
author={Yang Long and Gui-Song Xia and Liangpei Zhang and Gong Cheng and Deren Li}, <br>
year={2022}, <br>
eprint={2201.01953}, <br>
archivePrefix={arXiv}, <br>
primaryClass={cs.CV} <br>
}
</p>
</div>
</div>
<div style="padding-top: 10px">
<h5>Contact</h5>
<p style="font-size:15px: 15px; line-height: 21px; margin-bottom: 10px;">If you have any the problem or feedback in using Million-AID, please contact</p>
<ul style="list-style-type: disc;">
<li><strong>Yang Long</strong> at: <strong>[email protected]</strong></li>
<li><strong>Gui-Song Xia</strong> at: <strong>[email protected]</strong></li>
</ul>
<div class="clear"></div>
</div>
</li>
<li id="description-aid" class="clearfix" style="padding-top: 20px">
<p style="text-align: justify;"><a href="https://captain-whu.github.io/AID/" target="_blank" class="hovering"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">AID</strong></a> is a large-scale aerial image dataset, by collecting sample images from Google Earth imagery. Note that although the Google Earth images are post-processed using RGB renderings from the original optical aerial images, it has proven that there is no significant difference between the Google Earth images with the real optical aerial images even in the pixel-level land use/cover mapping. Thus, the Google Earth images can also be used as aerial images for evaluating scene classification algorithms.</p>
<p style="text-align: justify;">The dataset is made up of the following 30 aerial scene types: airport, bare land, baseball field, beach, bridge, center, church, commercial, dense residential, desert, farmland, forest, industrial, meadow, medium residential, mountain, park, parking, playground, pond, port, railway station, resort, river, school, sparse residential, square, stadium, storage tanks and viaduct. All the images are labelled by the specialists in the field of remote sensing image interpretation, and some samples of each class are shown in Fig.1. In all, the AID dataset has a number of 10000 images within 30 classes.</p>
<p style="text-align: justify;">The images in AID are actually multi-source, as Google Earth images are from different remote imaging sensors. This brings more challenges for scene classification than the single source images like UC-Merced dataset. Moreover, all the sample images per each class in AID are carefully chosen from different countries and regions around the world, mainly in China, the United States, England, France, Italy, Japan, Germany, etc., and they are extracted at different time and seasons under different imaging conditions, which increases the intra-class diversities of the data. </p>
<div class="clearfix" align="center">
<a href="https://captain-whu.github.io/AID/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-images/AID.png" alt="" class="scale-with-grid" /></a>
</div>
<div style="padding-top: 30px">
<h5>Data Download</h5>
<ul style="list-style-type: disc;">
<li>
<p>
<a href="https://captain-whu.github.io/AID/" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">Download AID</strong></a>
</p>
</li>
</ul>
</div>
<div style="padding-top: 10px">
<h5>Citation</h5>
<p style="margin-bottom: 10px">If you make use of AID, please cite our following work:</p>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@Article{Xia2017AID, <br>
title={AID: A Benchmark Data Set for Performance Evaluation of Aerial Scene Classification}, <br>
author={Gui-Song Xia and Jingwen Hu and Fan Hu and Baoguang Shi and Xiang Bai and Yanfei Zhong and Liangpei Zhang and Xiaoqiang Lu}, <br>
journal={IEEE Transactions on Geoscience and Remote Sensing}, <br>
year={2017}, <br>
volume={55}, <br>
number={7}, <br>
pages={3965-3981} <br>
}
</p>
</div>
</div>
<div style="padding-top: 10px">
<h5>Contact</h5>
<p style="font-size:15px: 15px; line-height: 21px; margin-bottom:10px;">If you have any the problem or feedback in using AID, please contact</p>
<ul style="list-style-type: disc;">
<li><strong>Jingwen Hu</strong> at: <strong>[email protected]</strong></li>
<li><strong>Gui-Song Xia</strong> at: <strong>[email protected]</strong></li>
</ul>
<div class="clear"></div>
</div>
</li>
<li id="description-whurs19" class="clearfix" style="padding-top: 20px">
<p style="text-align: justify;"><a href="" target="_blank" class="hovering"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">WHU-RS19</strong></a> is a set of satellite images exported from Google Earth, which provides high-resolution satellite images up to 0.5 m. Some samples of the database are displayed in the following picture. It contains 19 classes of meaningful scenes in high-resolution satellite imagery, including airport, beach, bridge, commercial, desert, farmland, footballfield, forest, industrial, meadow, mountain, park, parking, pond, port, railwaystation, residential, river, and viaduct. For each class, there are about 50 samples. It’s worth noticing that the image samples of the same class are collected from different regions in satellite images of different resolutions and then might have different scales, orientations and illuminations. </p>
<div class="clearfix" align="center">
<a href="" data-text="» Visit Project" class="hovering"><img src="images/dataset-images/WHU-RS19.svg" width="200%" alt="" class="scale-with-grid" /></a>
</div>
<div style="padding-top: 30px">
<h5>Data Download</h5>
<ul style="list-style-type: disc;">
<li>
<p>
<a href="https://github.com/CAPTAIN-WHU/BED4RS/raw/main/datasets/WHU-RS19.zip" download="WHU-RS19.zip" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">Download WHU-RS19</strong></a>
</p>
</li>
</ul>
</div>
<div style="padding-top: 10px">
<h5>Citation</h5>
<p style="margin-bottom: 10px">If you make use of WHU-RS19 please cite our following work:</p>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@InProceedings{Xia2010WHURS19, <br>
title={Structural high-resolution satellite image indexing}, <br>
author={Gui-Song Xia and Wen Yang and Julie Delon and Yann Gousseau and Hong Sun and Henri MaÎtre}, <br>
journal={ Symposium: 100 Years ISPRS - Advancing Remote Sensing Science}, <br>
year={2010}, <br>
address={Vienna, Austria}, <br>
}
</p>
</div>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@Article{Dai2011WHURS19, <br>
title={Satellite Image Classification via Two-Layer Sparse Coding With Biased Image Representation}, <br>
author={Dengxin Dai and Wen Yang}, <br>
journal={IEEE Transactions on Geoscience and Remote Sensing}, <br>
year={2011}, <br>
volume={8}, <br>
number={1}, <br>
pages={173-176} <br>
}
</p>
</div>
</div>
<li id="description-nasc-tg2" class="clearfix" style="padding-top: 20px">
<p style="text-align: justify;"><a href="" target="_blank" class="hovering"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">NaSC-TG2</strong></a> (Natural Scene Classification with Tiangong-2 Remotely Sensed Imagery) is a novel benchmark dataset for remote sensing natural scene classification built from Tiangong-2 remotely sensed imagery. The goal of this dataset is to expand and enrich the annotation data for advancing remote sensing classification algorithms, especially for the natural scene classification. The dataset contains 20,000 images, which are equally divided into 10 scene classes: beach, circle farmland, cloud, desert, forest, mountain, rectangle farmland, residential, river, and snowberg. Each scene includes 2,000 images with a size of 128×128 pixels and a spatial resolution of 100 m. Compared with other datasets collected from the Google Earth, the NaSC-TG2 has abundant natural scenes with novel spatial scale and imaging performance. In addition to true-color RGB images, the NaSC-TG2 dataset also covers the corresponding 14-band multi-spectral scene images providing valuable experimental data for research on high-dimensional scene image classification algorithms.</p>
<div class="clearfix" align="center">
<a href="" data-text="» Visit Project" class="hovering"><img src="images/dataset-images/NaSC-TG2.png" alt="" class="scale-with-grid" /></a>
</div>
<div style="padding-top: 30px">
<h5>Data Download</h5>
<ul style="list-style-type: disc;">
<li>
<p>
<a href="http://www.msadc.cn/main/setsubDetail?id=1370312964720037889" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">Download NaSC-TG2</strong></a>
</p>
</li>
</ul>
</div>
<div style="padding-top: 10px">
<h5>Citation</h5>
<p style="margin-bottom: 10px">If you make use of NaSC-TG2 please cite our following work:</p>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@Article{Zhou2021NaSCTG2, <br>
title={NaSC-TG2: Natural Scene Classification With Tiangong-2 Remotely Sensed Imagery}, <br>
author={Zhuang Zhou and Shengyang Li and Wei Wu and Weilong Guo and Xuan Li and Guisong Xiaand Zifei Zhao}, <br>
journal={IEEE Journal of Selected Topics in Applied Earth Observations and Remote Sensing}, <br>
year={2021}, <br>
volume={14}, <br>
pages={3228-3242} <br>
}
</p>
</div>
</div>
<div style="padding-top: 10px">
<h5>Contact</h5>
<p style="font-size:15px: 15px; line-height: 21px; margin-bottom:10px;">If you have any the problem or feedback in using NaSC-TG2, please contact</p>
<ul style="list-style-type: disc;">
<li><strong>Zhuang Zhou</strong> at: <strong>[email protected]</strong></li>
<li><strong>Shengyang Li</strong> at: <strong>[email protected]</strong></li>
</ul>
<div class="clear"></div>
</div>
</li>
<li id="description-gid" class="clearfix" style="padding-top: 20px">
<p style="text-align: justify;"><a href="https://captain-whu.github.io/GID/" target="_blank" class="hovering"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">GID</strong></a> is large-scale land-cover dataset with Gaofen-2 (GF-2) satellite images. This new dataset, which is named as Gaofen Image Dataset (GID), has superiorities over the existing land-cover dataset because of its large coverage, wide distribution, and high spatial resolution. GID consists of two parts: a large-scale classification set and a fine land-cover classification set. The large-scale classification set contains 150 pixel-level annotated GF-2 images, and the fine classification set is composed of 30,000 multi-scale image patches coupled with 10 pixel-level annotated GF-2 images. The training and validation data with 15 categories is collected and re-labeled based on the training and validation images with 5 categories, respectively. </p>
<div class="clearfix" align="center">
<a href="https://captain-whu.github.io/GID/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-images/GID.png" alt="" class="scale-with-grid" /></a>
</div>
<div style="padding-top: 30px;">
<h5>Data Download</h5>
<ul style="list-style-type: disc;">
<li>
<p>
<a href="https://captain-whu.github.io/GID/" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">Download GID</strong></a>
</p>
</li>
</ul>
</div>
<div style="padding-top: 10px">
<h5>Citation</h5>
<p style="margin-bottom: 10px">If you make use of GID, please cite our following work:</p>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@Article{Tong2020GID, <br>
title={Land-Cover Classification with High-Resolution Remote Sensing Images Using Transferable Deep Models}, <br>
author={Xin-Yi Tong, Gui-Song Xia, Qikai Lu, Huangfeng Shen, Shengyang Li, Shucheng You, Liangpei Zhang}, <br>
journal={Remote Sensing of Environment}, <br>
year={2020}, <br>
volume={237}, <br>
pages={111322} <br>
}
</p>
</div>
</div>
<div style="padding-top: 10px">
<h5>Contact</h5>
<p style="font-size:15px: 15px; line-height: 21px; margin-bottom:10px;">If you have any the problem or feedback in using GID, please contact</p>
<ul style="list-style-type: disc;">
<li><strong>Xinyi Tong</strong> at: <strong>[email protected]</strong></li>
<li><strong>Gui-Song Xia</strong> at: <strong>[email protected]</strong></li>
</ul>
<div class="clear"></div>
</div>
</li>
<li id="description-second" class="clearfix" style="padding-top: 20px">
<p style="text-align: justify;"><a href="https://captain-whu.github.io/SCD/" target="_blank" class="hovering"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">SECOND</strong></a> is a large-scale aerial image dataset for semantic change detection (SCD). In order to set up a new benchmark for SCD problems with adequate quantities, sufficient categories and proper annotation methods, in this paper we present SECOND, a well-annotated semantic change detection dataset. To ensure data diversity, we firstly collect 4662 pairs of aerial images from several platforms and sensors. These pairs of images are distributed over the cities such as Hangzhou, Chengdu, and Shanghai. Each image has size 512 x 512 and is annotated at the pixel level. The annotation of SECOND is carried out by an expert group of earth vision applications, which guarantees high label accuracy. For the change category in the SECOND dataset, we focus on 6 main land-cover classes, i.e. , non-vegetated ground surface, tree, low vegetation, water, buildings and playgrounds , that are frequently involved in natural and man-made geographical changes. It is worth noticing that, in the new dataset, non-vegetated ground surface ( n.v.g. surface for short) mainly corresponds to impervious surface and bare land. In summary, these 6 selected land-cover categories result in 30 common change categories (including non-change ). Through the random selection of image pairs, the SECOND reflects real distributions of land-cover categories when changes occur. </p>
<div class="clearfix" align="center">
<a href="https://captain-whu.github.io/SCD/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-images/SECOND.png" alt="" class="scale-with-grid" /></a>
</div>
<div style="padding-top: 30px">
<h5>Data Download</h5>
<ul style="list-style-type: disc;">
<li>
<p>
<a href="https://captain-whu.github.io/SCD/" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">Download SECOND</strong></a>
</p>
</li>
</ul>
</div>
<div style="padding-top: 10px">
<h5>Citation</h5>
<p style="margin-bottom: 10px">If you make use of SECOND, please cite our following work:</p>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@Misc{Yang2020SECOND, <br>
title={Semantic Change Detection with Asymmetric Siamese Networks}, <br>
author={Kunping Yang and Gui-Song Xia and Zicheng Liu and Bo Du and Wen Yang and Marcello Pelillo and Liangpei Zhang}, <br>
year={2020}, <br>
eprint={arXiv:2010.05687} <br>
}
</p>
</div>
</div>
<div style="padding-top: 10px">
<h5>Contact</h5>
<p style="font-size:15px: 15px; line-height: 21px; margin-bottom:10px;">If you have any the problem or feedback in using SECOND, please contact</p>
<ul style="list-style-type: disc;">
<li><strong>Kunping Yang</strong> at: <strong>[email protected]</strong></li>
<li><strong>Gui-Song Xia</strong> at: <strong>[email protected]</strong></li>
</ul>
<div class="clear"></div>
</div>
</li>
<li id="description-uavid" class="clearfix" style="padding-top: 20px">
<p style="text-align: justify;"><a href="https://www.uavid.nl/" target="_blank" class="hovering"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">UAVid</strong></a> dataset is an UAV video dataset for semantic segmentation task focusing on urban scenes. As a new high-resolution UAV semantic segmentation, UAVid dataset brings new challenges, including large scale variation, moving object recognition and temporal consistency preservation. Our UAV dataset consists of 42 video sequences capturing 4K high-resolution images in slanted views. In total, 300 images have been densely labeled for the semantic labeling task. There are 8 semantic categories in UAVid, including Building, road, static car, tree, low vegetation, human, moving car, background clutter. </p>
<div class="clearfix" align="center">
<a href="https://www.uavid.nl/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-images/UAVid2.png" alt="" class="scale-with-grid" /></a>
</div>
<div style="padding-top: 30px">
<h5>Data Download</h5>
<ul style="list-style-type: disc;">
<li>
<p>
<a href="https://www.uavid.nl/" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">Download UAVid</strong></a>
</p>
</li>
</ul>
</div>
<div style="padding-top: 10px">
<h5>Citation</h5>
<p style="margin-bottom: 10px">If you make use of UAVid, please cite our following works:</p>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@Article{LYU2020108, <br>
title={UAVid: A semantic segmentation dataset for UAV imagery}, <br>
author={Ye Lyu and George Vosselman and Gui-Song Xia and Alper Yilmaz and Michael Ying Yang}, <br>
journal={ISPRS Journal of Photogrammetry and Remote Sensing}, <br>
year={2020}, <br>
volume={165}, <br>
pages={108-119} <br>
}
</p>
</div>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@misc{1810.10438, <br>
Title={The UAVid Dataset for Video Semantic Segmentation}, <br>
Author={Ye Lyu and George Vosselman and Guisong Xia and Alper Yilmaz and Michael Ying Yang}, <br>
Year={2018}, <br>
Eprint={arXiv:1810.10438}, <br>
}
</p>
</div>
</div>
<div style="padding-top: 10px">
<h5>Contact</h5>
<p style="font-size:15px: 15px; line-height: 21px; margin-bottom:10px;">If you have any the problem or feedback in using UAVid, please contact</p>
<ul style="list-style-type: disc;">
<li><strong>Ye Lyu</strong> at: <strong>[email protected]</strong></li>
<li><strong>Michael Ying Yang</strong> at: <strong>[email protected]</strong></li>
<li><strong>Gui-Song Xia</strong> at: <strong>[email protected]</strong></li>
</ul>
<div class="clear"></div>
</div>
</li>
<li id="description-isaid" class="clearfix" style="padding-top: 20px">
<p style="text-align: justify;"><a href="https://captain-whu.github.io/iSAID/" target="_blank" class="hovering"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">iSAID</strong></a> is the first benchmark dataset for instance segmentation in aerial images. This large-scale and densely annotated dataset is built on the basis of <a href="https://captain-whu.github.io/DOTA/dataset.html" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">DOTA-v1.0</strong></a> dataset. It contains 655,451 object instances for 15 categories across 2,806 high-resolution images. The distinctive characteristics of iSAID are the following: (a) large number of images with high spatial resolution, (b) fifteen important and commonly occurring categories, (c) large number of instances per category, (d) large count of labelled instances per image, which might help in learning contextual information, (e) huge object scale variation, containing small, medium and large objects, often within the same image, (f) Imbalanced and uneven distribution of objects with varying orientation within images, depicting real-life aerial conditions, (g) several small size objects, with ambiguous appearance, can only be resolved with contextual reasoning, (h) precise instance-level annotations carried out by professional annotators, cross-checked and validated by expert annotators complying with well-defined guidelines. </p>
<div class="clearfix" align="center">
<a href="https://captain-whu.github.io/iSAID/" target="_blank" data-text="» Visit Project" class="hovering"><img src="images/dataset-images/iSAID.png" alt="" class="scale-with-grid" /></a>
</div>
<div style="padding-top: 30px">
<h5>Data Download</h5>
<ul style="list-style-type: disc;">
<li>
<p>
<a href="https://captain-whu.github.io/iSAID/" target="_blank" class="borderhover"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;">Download iSAID</strong></a>
</p>
</li>
</ul>
</div>
<div style="padding-top: 10px">
<h5>Citation</h5>
<p style="margin-bottom: 10px">If you make use of iSAID, please cite our following works:</p>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@InProceedings{waqas2019isaid, <br>
title={iSAID: A Large-scale Dataset for Instance Segmentation in Aerial Images}, <br>
author={Waqas Zamir, Syed and Arora, Aditya and Gupta, Akshita and Khan, Salman and Sun, Guolei and Shahbaz Khan, Fahad and Zhu, Fan and Shao, Ling and Xia, Gui-Song and Bai, Xiang}, <br>
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops}, <br>
pages={28--37}, <br>
year={2019} <br>
}
</p>
</div>
<div style="background-color:#F5F5F5;">
<p style="margin-left: 10px;font-size: 13px">
@InProceedings{Xia_2018_CVPR, <br>
title={DOTA: A Large-Scale Dataset for Object Detection in Aerial Images}, <br>
author={Gui-Song Xia and Xiang Bai and Jian Ding and Zhen Zhu and Serge Belongie and Jiebo Luo and Mihai Datcu and Marcello Pelillo and Liangpei Zhang}, <br>
booktitle={The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, <br>
month={June}, <br>
year={2018}
}
</p>
</div>
</div>
<div style="padding-top: 10px">
<h5>Contact</h5>
<p style="font-size:15px: 15px; line-height: 21px; margin-bottom:10px;">If you have any the problem or feedback in using iSAID, please contact</p>
<ul style="list-style-type: disc;">
<li><strong>Syed Waqas Zamir</strong> at: <strong>[email protected]</strong></li>
<li><strong>Aditya Arora</strong> at: <strong>[email protected]</strong></li>
<li><strong>Akshita Gupta</strong> at: <strong>[email protected]</strong></li>
<li><strong>Jian Ding</strong> at: <strong>[email protected]</strong></li>
<li><strong>Gui-Song Xia</strong> at: <strong>[email protected]</strong></li>
</ul>
<div class="clear"></div>
</div>
</li>
<li id="support" class="clearfix" style="padding-top: 30px">
<div class="sixteen columns" style="padding-top: 1px">
<p style="text-indent:2em"><a href="http://www.captain-whu.com/xia_En.html" target="_blank"><strong style="font-style: italic;color: rgb(255,0,78);text-decoration: underline;font-size:21px">CAPTAIN</strong></a></p>
<p style="text-indent:2em;">School of Computer Science & State Key Lab. LIESMARS, Wuhan University</p>
<p style="text-indent:2em;">Luojia Hill, Bayi Road, Wuhan, 430079, China.</p>
<p style="text-indent:2em;">[email protected]</p>
<p style="text-indent:2em;">027-68772503</p>
<ul>
<div class="map"><div id="abcd">
<a href="https://goo.gl/maps/GmtHbbVCatWsN16k7" target="_blank" data-text="» Visit Location" class="hovering"><img src="images/location.png" alt="" class="scale-with-grid" style="width: 96.5%;height: 96.5%;" /></a>
</div></div>
<!-- <div class="map"><div id="abcd"></div></div>
<style>
#container > div#abcd {display: block !important;}
#abcd {width:99%; height:500px;overflow: hidden;margin:0;}
#l-map{height:100%;width:78%;float:left;border-right:2px solid #bcbcbc;}
#r-result{height:100%;width:20%;float:left;}
.mywindow{ height:auto; width:auto; font-size:12px; line-height:22px;}
.mylocationcontainer{width:100%; height:100%; margin:0 auto;}
.mapimg{width:100%;height:100%;}
.BMap_cpyCtrl span,.anchorBL{display:none!important;}
</style>
<script type="text/javascript" src="https://api.map.baidu.com/api?v=1.5&ak=B3f7707c25da5b29a6ff69618788a296"></script>
<script type="text/javascript">
var map = new BMap.Map("abcd");
var point = new BMap.Point(114.363862,30.544807);
//map.enableScrollWheelZoom(true);
map.enableScrollWheelZoom();
map.enableContinuousZoom();
// map.centerAndZoom(point, 15);
map.addControl(new BMap.NavigationControl());
var marker = new BMap.Marker(point);
/*map.addOverlay(marker); */
marker.setAnimation(BMAP_ANIMATION_BOUNCE);var sContent ='<div class="mywindow">CAPTAIN<br>Address: No.129 Luoyu Road, Hongshan District, Wuhan, 430079, China.<br> Office: The 3rd floor of #2 Teaching Building, Information Department of Wuhan University<br>Telephone: 027-68779908<br>Email: [email protected]</div>'; var infoWindow = new BMap.InfoWindow(sContent);
map.centerAndZoom(new BMap.Point(114.3561,30.5485), 18);
map.addOverlay(marker);
marker.addEventListener("click", function(){
this.openInfoWindow(infoWindow);s
document.getElementById("Coolwpimg").onload = function (){
infoWindow.redraw();
}
});
</script> -->
</ul>
<br/><br/>
</div>
</li>
</ul>
<!-- Quotes -->
<!-- <div class="one_half">
<h6>Blockquote</h6>
<blockquote>Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. </blockquote>
</div>
<div class="one_half lastcolumn">
<h6>List</h6>
<ul class="square">
<li>Flexible Layout</li>
<li>Compatible with Common Browsers, Smartphones and Tablet Pc's</li>
<li>Background Images / Tiles</li>
<li>Column or Full-Width Site Style</li>
</ul>
</div><div class="clear"></div> -->
</div>
<!-- Space Adjuster
================================================== -->
<!-- <div class="sixteen columns bottomadjust"></div> -->
</div><!-- container -->
<!-- Footer
================================================== -->
<!-- Change to class="container footerwrap full" for a full-width footer -->
<!-- <div class="container footerwrap" style="padding: 0px 0px">
<div class="footer">
<div class="sixteen columns">
© 2021 Computational and Photogrammetric Vision Team (CAPTAIN)
</div>
</div>
</div> -->
<!-- Sub-Footer
================================================== -->
<!-- Change to class="container subfooterwrap full" for a full-width subfooter -->
<div class="container subfooterwrap" style="height:80px;padding: 0 0 0 0">
<div class="subfooter" style="width: 1080px">
<div class="eight columns" style="padding-top: 10px;">
<a href="http://www.captain-whu.com/xia_En.html" target="_blank">
Luojia Hill, Bayi Road, Hongshan district, Wuhan, Hubei province, China <br>
© CAPTAIN, School of Computer Science, Wuhan University
</a>
</div>
<div class="ten columns" style="margin-left:20px; margin-right: 0px; padding-right: 0px;">
<ul class="socialicons">
<li>
<a href="https://clustrmaps.com/site/1bhp3" target="_blank" title="Visit tracker">
<img style="width:120px;height:80px" src="//www.clustrmaps.com/map_v2.png?d=SjeXuchLyr6aPWeBqJfbjkehR3HQySHjYOlwEl8UpiQ&cl=ffffff">
</a>
</li>
</ul>
<!-- <div class="socialtext"></div> -->
</div>
</div>
</div>
<!-- End Document
================================================== -->
<!-- Scripts
================================================== -->
<script src="js/jquery-1.7.min.js" type="text/javascript"></script>
<script src="js/jquery.easing.1.3.js" type="text/javascript"></script>
<script src="js/jquery.animate-colors-min.js" type="text/javascript"></script>
<!--/***********************************************
* Smooth Navigational Menu- (c) Dynamic Drive DHTML code library (www.dynamicdrive.com)
* This notice MUST stay intact for legal use
* Visit Dynamic Drive at http://www.dynamicdrive.com/ for full source code
***********************************************/-->
<script src="js/ddsmoothmenu.js" type="text/javascript"></script>
<script src="js/jquery.cssAnimate.mini.js" type="text/javascript"></script>
<script src="js/jquery.fitvids.js" type="text/javascript"></script>
<script src="js/jquery.flexslider-min.js" type="text/javascript"></script>
<script src="js/jquery.prettyPhoto.js" type="text/javascript"></script>
<script src="js/screen.js" type="text/javascript"></script>
</body>
</html>