@article { author = {Dodangeh, Parisa and Ebadi, Hamid and Kiani, Abbas}, title = {Deep Learning Network for Flood Extent Mapping based on the integration of Sentinel 2 and MODIS satellite imagery}, journal = {Journal of Environmental Studies}, volume = {47}, number = {2}, pages = {181-204}, year = {2021}, publisher = {دانشگاه تهران}, issn = {1025-8620}, eissn = {2345-6922}, doi = {10.22059/jes.2021.325289.1008187}, abstract = {Introduction:Flood is a natural hazard that causes many deaths each year, and due to the effects of climate change, the number of occurrences is increasing worldwide. Therefore, natural disaster damage assessment, such as floods, provides important information to support decision-making and policy development in the field of natural hazard management and climate change planning. In this regard, in recent years, various methods for classifying remote sensing images have been developed, which always face challenges in differentiating a variety of land uses. Another challenge in flood crisis management is the lack of access to satellite imagery with high temporal resolution while maintaining spatial resolution, which is more pronounced in the presence of cloud cover in the area and occurs during floods. The purpose of this study is to identify flooded areas in Khuzestan province following the flood of 1398, which is based on the integration of optical images of Sentinel 2 and MODIS to produce a time series with relatively good spatial and temporal resolution. In order to classify and prepare maps, a patch-based hierarchical convolutional neural network has been designed, which solves the challenge of extracting deep features due to the relatively weak structure of images with a resolution of more than 10 meters. In addition, the effect of different neighborhood dimensions on the extraction of deep features in all images has been investigated. Finally, the area of damage to urban land cover and various agricultural lands has been estimated consecutively during the flood period.Material and methods:The data used in this research are two series of different satellite images including Sentinel-2 MSI Level-1C images with a spatial resolution of 10 meters and the product of MODIS daily surface reflectance (MOD09GA) with a spatial resolution of 500 meters. The general process of implementing this research can be summarized in 7 phases. In the first phase, the data is first pre-processed. Then, in the second phase, the image fusion algorithm is implemented to predict the daily surface reflectivity of the images, and if the error and accuracy of the predicted images are appropriate, the time series of the flood period is obtained. In the third phase, Ground truth maps are prepared by the researcher using image interpretation. In the fourth phase, training samples are prepared from these data to perform various classifications such as deep learning approaches and machine learning, and the proposed network is implemented in different input dimensions. It should be noted that the number of training and validation samples in deep learning networks has been very limited and less than half a percent of images to automate and reduce user dependence. In the fifth phase, to perform damage assessment in the agricultural and vegetation regions, the relevant maps are prepared with the best approach tested in the previous phase, and finally, in the sixth phase, accuracy assessments are performed by the confusion matrix and related criteria. In the last phase which is the seventh phase, the area of flood-affected land uses is estimated.Discussion:The present study is implemented to improve one of the most important issues in crisis management in the country, namely the assessment of damage caused by the sudden phenomenon of floods. Therefore, presenting a method with appropriate speed compared to existing methods and also increasing the accuracy of final maps due to its challenging has been one of the objectives of this research. First, in order to prepare a suitable time series of optical data with an appropriate spatial and temporal resolution, the ESTARFM fusion algorithm was used. According to the evaluations performed for the two integrated images, this algorithm has high efficiency and accuracy in areas with heterogeneous coverage. Due to the change in environmental conditions between the images, the maximum errors have occurred in water-sensitive bands, but all errors due to their small values in each band indicate the efficiency of the algorithm used. In addition, since the two images are predicted in time series, so the generalizability of the algorithm has been investigated and proven. Furthermore, regarding the classification algorithms for preparing the destruction map, the proposed neural network has a significant difference in accuracy compared to other approaches. In addition, in the study of the extracted classes, in the proposed approach, the built-up areas benefit from a very high identification compared to other algorithms and the appropriateness of other uses, especially the use of water areas, is maintained. According to the studies, the highest rate of flooding in the study area was in the third week of April and after that, the area has been experiencing a decreasing trend. Therefore, the damage was estimated on April 14 and 21. According to the assessments, flooding has decreased from April 14 to April 21 in built-up areas, rainfed and fallow lands, and has increased in wetland and Aquatic cultivation areas.Conclusion:In this research, the ESTARFM image fusion algorithm, which is known to be suitable for combining images in heterogeneous regions, has been used for April 8 and April 14 images, and the evaluations have been done with the help of scatter plots and least-squares error. The results showed the efficiency of the method in integrating relatively high-resolution Sentinel 2 images and low-resolution MODIS images in the field of flood management. In the field of identifying flooded areas and further due to the poor structure of images with a resolution above 10 meters, the possibility of extracting optimal and deep features is difficult. In the present study, a patch-based convolutional neural network has been designed with a minimum of layers and hyper-parameters, which provides the possibility of training from scratch with the least amount of training samples and without overfitting for images with different environmental conditions. Also, in order to find the optimal state, the dimensions of different inputs in all images have been tested to make a comparison of the effect of different neighborhoods. Thus, patches of sizes 3 to 11 were tested, patches 5 and 7 in the pre-flood image, and patches 9 and 11 in the post-flood images were the best. The results were compared with approaches such as object and pixel-based SVM, LCNN, and DCNN neural networks with dimensions of 3 × 3 and 5 × 5 according to the reference research, and had a significant improvement in accuracy. Time evaluations were performed between all approaches and the lowest time was related to the proposed approach with patch dimensions of 3 × 3 and 5 × 5 and the highest time was related to DCNN network with dimensions of 5 × 5. However, due to the importance of time in crisis management and the need to prepare a high-speed map, the proposed approach has provided an appropriate response. If the time and accuracy are proportionally considered in implementing the research, the designed network in 9 × 9 input dimensions is recommended because in this case, both the accuracy and the time superiority are satisfied.}, keywords = {Deep learning,Convolutional Neural Network,Image Fusion,Flood map,flood crisis management}, title_fa = {استخراج محدوده‌های سیل‌زده بر پایه ادغام تصاویر ماهواره‌ای سنتینل 2 و مادیس و مبتنی بر شبکه یادگیری عمیق}, abstract_fa = {رخداد سیلاب در مناطق نیمه‌شهری همواره با خسارات زیادی به زیرساخت‌های مختلف همراه است. لذا ارائه رویکردهایی با توانایی ارزیابی دقیق مناطق سیل‌زده در کمترین زمان ممکن از ضرورت‌های مدیریت بحران می‌باشد. بدین منظور روش‌های طبقه‌بندی تصاویر توسعه داده‌شده که با چالش‌هایی در تفکیک کاربری‌ها روبه‌رو می‌باشند. ازجمله چالش‌های موجود در مطالعات سیل، عدم دسترسی به تصاویر ماهواره‌ای با وضوح زمانی بالا ضمن حفظ دقت مکانی می‌باشد. هدف این تحقیق، برآورد میزان آب‌گرفتگی کاربری‌های مختلف در پی رخداد سیل در استان خوزستان در سال 1398 می‌باشد که بر اساس تلفیق تصاویر انجام‌گرفته است. به‌منظور تهیه نقشه‌‌ی مناطق تحت تأثیر سیلاب نیز اقدام به طراحی شبکه عصبی کانولوشنی مبتنی بر پنچره تصویری شده که چالش موجود در استخراج ویژگی‌های عمیق با توجه به ساختار نسبتاً ضعیف تصاویر مورداستفاده را برطرف می‌نماید. درنهایت نقشه سیلاب در کاربری‌های مختلف به‌صورت متوالی در دوران سیلاب برآورد شده است. نقشه‌های کاربری قبل از سیل توسط روش پیشنهادی دقت 73 و نقشه‌های کاربری پس از سیل به ترتیب زمانشان، دقت‌های 75، 5/77 و 79 را کسب نمودند. نتایج حاکی از عملکرد مناسب رویکرد پیشنهادی در مواجه با چالش سرعت و دقت بوده که پیاده‌سازی آن بر روی تصاویر مختلف سیلاب بیانگر عمومیت داشتن فرآیند می‌باشد.}, keywords_fa = {یادگیری عمیق,شبکه عصبی کانولوشنی,تلفیق تصاویر,نقشه سیلاب,مدیریت بحران سیل}, url = {https://jes.ut.ac.ir/article_84190.html}, eprint = {https://jes.ut.ac.ir/article_84190_86f61caa59f40d125191ab106b52b2c0.pdf} }