<script src="https://bibbase.org/show?bib=https%3A%2F%2Fblogs.cornell.edu%2Fcair%2Ffiles%2F2021%2F12%2Flab_pub_20211230.bib&commas=true&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fblogs.cornell.edu%2Fcair%2Ffiles%2F2021%2F12%2Flab_pub_20211230.bib&commas=true");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fblogs.cornell.edu%2Fcair%2Ffiles%2F2021%2F12%2Flab_pub_20211230.bib&commas=true"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@inproceedings{RN37, author = {Liu, Ertai and Gold, Kaitlin M. and Combs, David and Cadle-Davidson, Lance and Jiang, Yu}, title = {Deep learning-based autonomous downy mildew detection and severity estimation in vineyards}, booktitle = {2021 ASABE Annual International Virtual Meeting}, series = {ASABE Paper No. 2100486}, publisher = {ASABE}, pages = {1}, abstract = {Downy mildew (DM) poses a significant challenge for many high value crops such as grapes. DM can appear at nearly all stages of grapevine growth on clusters, canes, and leaves, and can result in total crop loss. Traditional DM assessment methods rely on in-field human observations or laboratory analysis of plant characteristics, which are laborious and costly for periodical tracking of the disease. The overall goal of this study was to develop an effective deep learning-based approach for the quantification of DM infection in vineyards. A custom ATV-based imaging system was used to collect georeferenced stereo images of grapevines in a vineyard, forming a dataset of 2072 image pairs. Human experts manually assessed the grapevines DM infection severity on the same day of data collection. A total of 58 images were selected and manually annotated to train and validate a segmentation model for DM detection in stereo image pairs. A Hierarchical Multi-Scale Attention Semantic Segmentation (HMASS) network was selected as the segmentation model. Based on the geolocation, the stereo pairs were segregated into each vine block. The trained DM detector was used to identify infection regions in all stereo pairs in a vine block, and all detections were projected onto the 3D space to remove duplicated detections. The ratio between area of leaves calculated by image color filtering and area of infections given by segmentation model was used to quantify the infection severity in each block. Experimental results showed that the trained HMASS detector could accurately identify DM infected regions. Infection severity rates calculated using the developed approach were highly correlated (r=0.96) with the human field assessment. The infection severity ranking of all 6 DM treatments in the vineyard provided by the developed approach were also identical to the human assessments. These results suggest that the developed approach can be used for rapid and accurate DM detection, which lays the foundation for the development of automated DM quantification and management systems.}, keywords = {computer vision plant disease downy mildew machine learning proximal sensing vineyard management.}, DOI = {https://doi.org/10.13031/aim.202100486}, url_Link = {https://elibrary.asabe.org/abstract.asp?aid=52414&t=5}, year = {2021}, type = {Conference Proceedings} }
@inproceedings{RN36, author = {Qiu, Tian and Underhill, Anna and Sapkota, Surya Datta and Cadle-Davidson, Lance and Jiang, Yu}, title = {Deep learning-based saliency maps for the quantification of grape powdery mildew at the microscopic level}, booktitle = {2021 ASABE Annual International Virtual Meeting}, series = {ASABE Paper No. 2100496}, publisher = {ASABE}, pages = {1}, abstract = {Powdery mildew (PM) is one of the most widespread plant diseases and can damage a wide range of crops, causing significant economic losses annually. This urges the breeding of PM resistant crop cultivars and the development of management practices. A major bottleneck is the accuracy and efficiency of image analysis at the microscopic level, which is essential to understand PM infection and accelerate crop breeding and management practice development. The overall goal of this study was to develop a deep learning-based saliency map approach that can quantify PM infection in images of high spatial resolution. A subset of a total of 2690 images of 1-cm leaf disks was randomly selected to extract a total of 21,162 image patches of 224x224 pixels. A custom thresholding method was used to mask out irrelevant background information from a leaf disk image. The remaining image part was cropped into image patches of 224x224 pixels to be classified by pretrained CNN classifiers. For the patches predicted as infected, patch saliency maps were generated using several saliency methods. All patch saliency maps were re-assembled to construct a leaf-level infection map for the quantification of PM infection in leaf disk images. Experimental results showed that with a well-trained CNN classifier (validation accuracy of 95.66%), our approach achieved remarkable accuracy of the localization and quantification of PM hyphae by using only patch-level class annotations, suggesting a great potential of reducing annotation cost for deep learning-based quantification. Compared with the manual assessment, our approach also improved the processing speed by 20 to 60 times. Therefore, the developed approach can be an effective and efficient analysis tool for PM disease research in the future.}, keywords = {Disease quantification Explainable AI High-throughput phenotyping Grape powdery mildew.}, DOI = {https://doi.org/10.13031/aim.202100496}, url_Link = {https://elibrary.asabe.org/abstract.asp?aid=52418&t=5}, year = {2021}, type = {Conference Proceedings} }
@article{RN39, author = {Adke, Shrinidhi and Haro von Mogel, Karl and Jiang, Yu and Li, Changying}, title = {Instance segmentation to estimate consumption of corn ears by wild animals for GMO preference tests}, journal = {Frontiers in Artificial Intelligence}, volume = {3}, number = {119}, abstract = {The Genetically Modified (GMO) Corn Experiment was performed to test the hypothesis that wild animals prefer Non-GMO corn and avoid eating GMO corn, which resulted in the collection of complex image data of consumed corn ears. This study develops a deep learning-based image processing pipeline that aims to estimate the consumption of corn by identifying corn and its bare cob from these images, which will aid in testing the hypothesis in the GMO Corn Experiment. Ablation uses mask regional convolutional neural network (Mask R-CNN) for instance segmentation. Based on image data annotation, two approaches for segmentation were discussed: identifying whole corn ears and bare cob parts with and without corn kernels. The Mask R-CNN model was trained for both approaches and segmentation results were compared. Out of the two, the latter approach, i.e., without the kernel, was chosen to estimate the corn consumption because of its superior segmentation performance and estimation accuracy. Ablation experiments were performed with the latter approach to obtain the best model with the available data. The estimation results of these models were included and compared with manually labeled test data with R<sup>2</sup> = 0.99 which showed that use of the Mask R-CNN model to estimate corn consumption provides highly accurate results, thus, allowing it to be used further on all collected data and help test the hypothesis of the GMO Corn Experiment. These approaches may also be applied to other plant phenotyping tasks (e.g., yield estimation and plant stress quantification) that require instance segmentation.}, keywords = {deep learning,Mask R-CNN,Instance segmentation,GMO,image processing}, ISSN = {2624-8212}, DOI = {https://doi.org/10.3389/frai.2020.593622}, url = {https://www.frontiersin.org/article/10.3389/frai.2020.593622}, year = {2021}, type = {Journal Article} }
@article{RN38, author = {Carlson, Craig H. and Stack, George M. and Jiang, Yu and Taşkıran, Bircan and Cala, Ali R. and Toth, Jacob A. and Philippe, Glenn and Rose, Jocelyn K C and Smart, Christine D. and Smart, Lawrence B.}, title = {Morphometric relationships and their contribution to biomass and cannabinoid yield in hybrids of hemp (Cannabis sativa)}, journal = {Journal of Experimental Botany}, volume = {72}, number = {22}, pages = {7694-7709}, abstract = {The breeding of hybrid cultivars of hemp (Cannabis sativa L.) is not well described, especially the segregation and inheritance of traits that are important for yield. A total of 23 families were produced from genetically diverse parents to investigate the inheritance of morphological traits and their association with biomass accumulation and cannabinoid yield. In addition, a novel classification method for canopy architecture was developed. The strong linear relationship between wet and dry biomass provided an accurate estimate of final dry stripped floral biomass. Of all field and aerial measurements, basal stem diameter was determined to be the single best selection criterion for final dry stripped floral biomass yield. Along with stem diameter, canopy architecture and stem growth predictors described the majority of the explainable variation of biomass yield. Within-family variance for morphological and cannabinoid measurements reflected the heterozygosity of the parents. While selfed populations suffered from inbreeding depression, hybrid development in hemp will require at least one inbred parent to achieve uniform growth and biomass yield. Nevertheless, floral phenology remains a confounding factor in selection because of its underlying influence on biomass production, highlighting the need to understand the genetic basis for flowering time in the breeding of uniform cultivars.}, ISSN = {0022-0957}, DOI = {https://doi.org/10.1093/jxb/erab346}, url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC8643699/pdf/erab346.pdf}, year = {2021}, type = {Journal Article} }
@article{RN34, author = {Jiang, Yu and Li, Changying}, title = {Convolutional neural networks for image-based high-throughput plant phenotyping: A review}, journal = {Plant Phenomics}, volume = {2020}, abstract = {Plant phenotyping has been recognized as a bottleneck for improving the efficiency of breeding programs, understanding plantenvironment interactions, and managing agricultural systems. In the past five years, imaging approaches have shown great potential for high-throughput plant phenotyping, resulting in more attention paid to imaging-based plant phenotyping. With this increased amount of image data, it has become urgent to develop robust analytical tools that can extract phenotypic traits accurately and rapidly. The goal of this review is to provide a comprehensive overview of the latest studies using deep convolutional neural networks (CNNs) in plant phenotyping applications. We specifically review the use of various CNN architecture for plant stress evaluation, plant development, and postharvest quality assessment. We systematically organize the studies based on technical developments resulting from imaging classification, object detection, and image segmentation, thereby identifying state-of-the-art solutions for certain phenotyping applications. Finally, we provide several directions for future research in the use of CNN architecture for plant phenotyping purposes.}, keywords = {yield estimation deep segmentation field classification phenomics system crop}, ISSN = {2643-6515}, DOI = {https://doi.org/10.34133/2020/4152816}, url_Link = {https://downloads.spj.sciencemag.org/plantphenomics/2020/4152816.pdf}, year = {2020}, type = {Journal Article} }
@article{RN35, author = {Jiang, Yu and Li, Changying and Xu, Rui and Sun, Shangpeng and Robertson, Jon S. and Paterson, Andrew H.}, title = {DeepFlower: a deep learning-based approach to characterize flowering patterns of cotton plants in the field}, journal = {Plant Methods}, volume = {16}, number = {1}, note = {Pb7vw Times Cited:4 Cited References Count:24}, abstract = {Background Flowering is one of the most important processes for flowering plants such as cotton, reflecting the transition from vegetative to reproductive growth and is of central importance to crop yield and adaptability. Conventionally, categorical scoring systems have been widely used to study flowering patterns, which are laborious and subjective to apply. The goal of this study was to develop a deep learning-based approach to characterize flowering patterns for cotton plants that flower progressively over several weeks, with flowers distributed across much of the plant. Results A ground mobile system (GPhenoVision) was modified with a multi-view color imaging module, to acquire images of a plant from four viewing angles at a time. A total of 116 plants from 23 genotypes were imaged during an approximately 2-month period with an average scanning interval of 2-3 days, yielding a dataset containing 8666 images. A subset (475) of the images were randomly selected and manually annotated to form datasets for training and selecting the best object detection model. With the best model, a deep learning-based approach (DeepFlower) was developed to detect and count individual emerging blooms for a plant on a given date. The DeepFlower was used to process all images to obtain bloom counts for individual plants over the flowering period, using the resulting counts to derive flowering curves (and thus flowering characteristics). Regression analyses showed that the DeepFlower method could accurately (R-2 = 0.88 and RMSE = 0.79) detect and count emerging blooms on cotton plants, and statistical analyses showed that imaging-derived flowering characteristics had similar effectiveness as manual assessment for identifying differences among genetic categories or genotypes. Conclusions The developed approach could thus be an effective and efficient tool to characterize flowering patterns for flowering plants (such as cotton) with complex canopy architecture.}, keywords = {flowering pattern deep learning object detection high-throughput plant phenotyping image analysis time association genes}, DOI = {https://doi.org/10.1186/s13007-020-00698-y}, url = {https://plantmethods.biomedcentral.com/track/pdf/10.1186/s13007-020-00698-y.pdf}, year = {2020}, type = {Journal Article} }
@article{RN19, author = {Jiang, Yu and Snider, John L. and Li, Changying and Rains, Glen C. and Paterson, Andrew H.}, title = {Ground based hyperspectral imaging to characterize canopy-level photosynthetic activities}, journal = {Remote Sensing}, volume = {12}, number = {2}, pages = {315}, ISSN = {2072-4292}, DOI = {https://doi.org/10.3390/rs12020315}, url = {https://www.mdpi.com/2072-4292/12/2/315}, year = {2020}, type = {Journal Article} }
@article{RN20, author = {Sun, Shangpeng and Li, Changying and Chee, Peng W. and Paterson, Andrew H. and Jiang, Yu and Xu, Rui and Robertson, Jon S. and Adhikari, Jeevan and Shehzad, Tariq}, title = {Three-dimensional photogrammetric mapping of cotton bolls in situ based on point cloud segmentation and clustering}, journal = {ISPRS Journal of Photogrammetry and Remote Sensing}, volume = {160}, pages = {195-207}, abstract = {Three-dimensional high throughput plant phenotyping techniques provide an opportunity to measure plant organ-level traits which can be highly useful to plant breeders. The number and locations of cotton bolls, which are the fruit of cotton plants and an important component of fiber yield, are arguably among the most important phenotypic traits but are complex to quantify manually. Hence, there is a need for effective and efficient cotton boll phenotyping solutions to support breeding research and monitor the crop yield leading to better production management systems. We developed a novel methodology for 3D cotton boll mapping within a plot in situ. Point clouds were reconstructed from multi-view images using the structure from motion algorithm. The method used a region-based classification algorithm that successfully accounted for noise due to sunlight. The developed density-based clustering method could estimate boll counts for this situation, in which bolls were in direct contact with other bolls. By applying the method to point clouds from 30 plots of cotton plants, boll counts, boll volume and position data were derived. The average accuracy of boll counting was up to 90% and the R2 values between fiber yield and boll number, as well as fiber yield and boll volume were 0.87 and 0.66, respectively. The 3D boll spatial distribution could also be analyzed using this method. This method, which was low-cost and provided improved site-specific data on cotton bolls, can also be applied to other plant/fruit mapping analysis after some modification.}, keywords = {Clustering Field-based high throughput phenotyping LiDAR Point cloud Segmentation Spatial distribution}, ISSN = {0924-2716}, DOI = {https://doi.org/10.1016/j.isprsjprs.2019.12.011}, url = {http://www.sciencedirect.com/science/article/pii/S0924271619302990}, year = {2020}, type = {Journal Article} }
@article{RN21, author = {Zhang, Mengyun and Jiang, Yu and Li, Changying and Yang, Fuzeng}, title = {Fully convolutional networks for blueberry bruising and calyx segmentation using hyperspectral transmittance imaging}, journal = {Biosystems Engineering}, volume = {192}, pages = {159-175}, abstract = {Early detection of internal bruises in blueberries is a significant challenge for the blueberry industry. The main goal of this study was to detect blueberries’ internal bruising accurately, after mechanical damage from hyperspectral transmittance images (HSTIs), using the deep learning-based method of fully convolutional networks (FCNs) for segmentation tasks. To improve detection accuracy, a total of three classes (bruised tissue, unbruised tissue, and calyx end of blueberries), were treated as segmentation targets. A near-infrared hyperspectral imaging system was used to acquire transmittance images of 1200 blueberries, and the images were divided randomly to form training, validation and testing sets. Three categories of input HSTIs were used to evaluate the FCN models using pre-trained weights (transfer learning) and random initialisation. Random forests and linear discriminant analysis were applied to generate 9-channel and 3-channel input images along with full-wavelength multi-channel images. The results indicate that when using the deep learning approach, blueberry bruises and calyx ends can be segmented from the blueberry fruit as early as 30 min after mechanical damage has been inflicted on the blueberries. The new full-wavelength model with random initialisation had the highest accuracy (81.2% over the entire test set), and can be used to research the resistance of blueberry fruit to mechanical damage. The new 3-channel and 9-channel models show potential for application to packing-line detection and online inspection.}, keywords = {image segmentation deep learning fully convolutional network bruise detection blueberry bruise}, ISSN = {1537-5110}, DOI = {https://doi.org/10.1016/j.biosystemseng.2020.01.018}, url = {http://www.sciencedirect.com/science/article/pii/S1537511020300301}, year = {2020}, type = {Journal Article} }