Automated Grapevine Inflorescence Counting in a Vineyard Using Deep Learning and Multi-object Tracking. Rahim, U., F., Utsumi, T., Iwaki, Y., & Mineno, H. 2023 15th International Conference on Computer and Automation Engineering, ICCAE 2023, Institute of Electrical and Electronics Engineers Inc., 2023. Paper doi abstract bibtex To adjust management practices and improve wine marketing strategies, accurate vineyard yield estimation early in the growing season is essential. Conventional methods for yield forecasting rely on phenotypic features' manual assessment, which is time- and labor-intensive and often destructive. We combined a deep object segmentation method, mask region-based convolutional neural network (Mask R-CNN), with two potential multi-object tracking algorithms, simple online and real-time tracking (SORT) and intersection-over-union (IOU) trackers to develop a complete visual system that can automatically detect and track individual inflorescences, enabling the assessment of the number of inflorescences per vineyard row from vineyard video footage. The performance of the two tracking algorithms was evaluated using our vineyard dataset, which is more challenging than conventional tracking benchmark datasets owing to environmental factors. Our evaluation dataset consists of videos of four vineyard rows, including 221 vines that were automatically acquired under unprepared field conditions. We tracked individual inflorescences across video image frames with a 92.1% multi-object tracking accuracy (MOTA) and an 89.6% identity F1 score (IDF1). This allowed us to estimate inflorescence count per vineyard row with a 0.91 coefficient of determination (R2) between the estimated count and manual-annotated ground truth count. The impact of leaf occlusions on inflorescence visibility was lessened by processing multiple successive image frames with minimal displacements to construct multiple camera views. This study demonstrates the use of deep learning and multi-object tracking in creating a low-cost (requiring only an RGB camera), high-throughput phenotyping system for precision viticulture.
@article{
title = {Automated Grapevine Inflorescence Counting in a Vineyard Using Deep Learning and Multi-object Tracking},
type = {article},
year = {2023},
keywords = {deep learning,high-throughput phenotyping,instance segmentation,multi-object tracking,precision viticulture},
pages = {276-280},
publisher = {Institute of Electrical and Electronics Engineers Inc.},
id = {fa897f4e-6961-34c8-a524-6cbfa9e1e48e},
created = {2023-10-27T08:04:57.476Z},
accessed = {2023-10-27},
file_attached = {true},
profile_id = {f1f70cad-e32d-3de2-a3c0-be1736cb88be},
group_id = {5ec9cc91-a5d6-3de5-82f3-3ef3d98a89c1},
last_modified = {2023-11-06T09:35:27.940Z},
read = {false},
starred = {false},
authored = {false},
confirmed = {false},
hidden = {false},
private_publication = {false},
abstract = {To adjust management practices and improve wine marketing strategies, accurate vineyard yield estimation early in the growing season is essential. Conventional methods for yield forecasting rely on phenotypic features' manual assessment, which is time- and labor-intensive and often destructive. We combined a deep object segmentation method, mask region-based convolutional neural network (Mask R-CNN), with two potential multi-object tracking algorithms, simple online and real-time tracking (SORT) and intersection-over-union (IOU) trackers to develop a complete visual system that can automatically detect and track individual inflorescences, enabling the assessment of the number of inflorescences per vineyard row from vineyard video footage. The performance of the two tracking algorithms was evaluated using our vineyard dataset, which is more challenging than conventional tracking benchmark datasets owing to environmental factors. Our evaluation dataset consists of videos of four vineyard rows, including 221 vines that were automatically acquired under unprepared field conditions. We tracked individual inflorescences across video image frames with a 92.1% multi-object tracking accuracy (MOTA) and an 89.6% identity F1 score (IDF1). This allowed us to estimate inflorescence count per vineyard row with a 0.91 coefficient of determination (R2) between the estimated count and manual-annotated ground truth count. The impact of leaf occlusions on inflorescence visibility was lessened by processing multiple successive image frames with minimal displacements to construct multiple camera views. This study demonstrates the use of deep learning and multi-object tracking in creating a low-cost (requiring only an RGB camera), high-throughput phenotyping system for precision viticulture.},
bibtype = {article},
author = {Rahim, Umme Fawzia and Utsumi, Tomoyoshi and Iwaki, Yohei and Mineno, Hiroshi},
doi = {10.1109/ICCAE56788.2023.10111243},
journal = {2023 15th International Conference on Computer and Automation Engineering, ICCAE 2023}
}
Downloads: 0
{"_id":"cvr7ebx48S9P7wkjH","bibbaseid":"rahim-utsumi-iwaki-mineno-automatedgrapevineinflorescencecountinginavineyardusingdeeplearningandmultiobjecttracking-2023","author_short":["Rahim, U., F.","Utsumi, T.","Iwaki, Y.","Mineno, H."],"bibdata":{"title":"Automated Grapevine Inflorescence Counting in a Vineyard Using Deep Learning and Multi-object Tracking","type":"article","year":"2023","keywords":"deep learning,high-throughput phenotyping,instance segmentation,multi-object tracking,precision viticulture","pages":"276-280","publisher":"Institute of Electrical and Electronics Engineers Inc.","id":"fa897f4e-6961-34c8-a524-6cbfa9e1e48e","created":"2023-10-27T08:04:57.476Z","accessed":"2023-10-27","file_attached":"true","profile_id":"f1f70cad-e32d-3de2-a3c0-be1736cb88be","group_id":"5ec9cc91-a5d6-3de5-82f3-3ef3d98a89c1","last_modified":"2023-11-06T09:35:27.940Z","read":false,"starred":false,"authored":false,"confirmed":false,"hidden":false,"private_publication":false,"abstract":"To adjust management practices and improve wine marketing strategies, accurate vineyard yield estimation early in the growing season is essential. Conventional methods for yield forecasting rely on phenotypic features' manual assessment, which is time- and labor-intensive and often destructive. We combined a deep object segmentation method, mask region-based convolutional neural network (Mask R-CNN), with two potential multi-object tracking algorithms, simple online and real-time tracking (SORT) and intersection-over-union (IOU) trackers to develop a complete visual system that can automatically detect and track individual inflorescences, enabling the assessment of the number of inflorescences per vineyard row from vineyard video footage. The performance of the two tracking algorithms was evaluated using our vineyard dataset, which is more challenging than conventional tracking benchmark datasets owing to environmental factors. Our evaluation dataset consists of videos of four vineyard rows, including 221 vines that were automatically acquired under unprepared field conditions. We tracked individual inflorescences across video image frames with a 92.1% multi-object tracking accuracy (MOTA) and an 89.6% identity F1 score (IDF1). This allowed us to estimate inflorescence count per vineyard row with a 0.91 coefficient of determination (R2) between the estimated count and manual-annotated ground truth count. The impact of leaf occlusions on inflorescence visibility was lessened by processing multiple successive image frames with minimal displacements to construct multiple camera views. This study demonstrates the use of deep learning and multi-object tracking in creating a low-cost (requiring only an RGB camera), high-throughput phenotyping system for precision viticulture.","bibtype":"article","author":"Rahim, Umme Fawzia and Utsumi, Tomoyoshi and Iwaki, Yohei and Mineno, Hiroshi","doi":"10.1109/ICCAE56788.2023.10111243","journal":"2023 15th International Conference on Computer and Automation Engineering, ICCAE 2023","bibtex":"@article{\n title = {Automated Grapevine Inflorescence Counting in a Vineyard Using Deep Learning and Multi-object Tracking},\n type = {article},\n year = {2023},\n keywords = {deep learning,high-throughput phenotyping,instance segmentation,multi-object tracking,precision viticulture},\n pages = {276-280},\n publisher = {Institute of Electrical and Electronics Engineers Inc.},\n id = {fa897f4e-6961-34c8-a524-6cbfa9e1e48e},\n created = {2023-10-27T08:04:57.476Z},\n accessed = {2023-10-27},\n file_attached = {true},\n profile_id = {f1f70cad-e32d-3de2-a3c0-be1736cb88be},\n group_id = {5ec9cc91-a5d6-3de5-82f3-3ef3d98a89c1},\n last_modified = {2023-11-06T09:35:27.940Z},\n read = {false},\n starred = {false},\n authored = {false},\n confirmed = {false},\n hidden = {false},\n private_publication = {false},\n abstract = {To adjust management practices and improve wine marketing strategies, accurate vineyard yield estimation early in the growing season is essential. Conventional methods for yield forecasting rely on phenotypic features' manual assessment, which is time- and labor-intensive and often destructive. We combined a deep object segmentation method, mask region-based convolutional neural network (Mask R-CNN), with two potential multi-object tracking algorithms, simple online and real-time tracking (SORT) and intersection-over-union (IOU) trackers to develop a complete visual system that can automatically detect and track individual inflorescences, enabling the assessment of the number of inflorescences per vineyard row from vineyard video footage. The performance of the two tracking algorithms was evaluated using our vineyard dataset, which is more challenging than conventional tracking benchmark datasets owing to environmental factors. Our evaluation dataset consists of videos of four vineyard rows, including 221 vines that were automatically acquired under unprepared field conditions. We tracked individual inflorescences across video image frames with a 92.1% multi-object tracking accuracy (MOTA) and an 89.6% identity F1 score (IDF1). This allowed us to estimate inflorescence count per vineyard row with a 0.91 coefficient of determination (R2) between the estimated count and manual-annotated ground truth count. The impact of leaf occlusions on inflorescence visibility was lessened by processing multiple successive image frames with minimal displacements to construct multiple camera views. This study demonstrates the use of deep learning and multi-object tracking in creating a low-cost (requiring only an RGB camera), high-throughput phenotyping system for precision viticulture.},\n bibtype = {article},\n author = {Rahim, Umme Fawzia and Utsumi, Tomoyoshi and Iwaki, Yohei and Mineno, Hiroshi},\n doi = {10.1109/ICCAE56788.2023.10111243},\n journal = {2023 15th International Conference on Computer and Automation Engineering, ICCAE 2023}\n}","author_short":["Rahim, U., F.","Utsumi, T.","Iwaki, Y.","Mineno, H."],"urls":{"Paper":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c/file/3458adc6-0db6-7e31-32d6-dcc6991b171a/full_text.pdf.pdf"},"biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","bibbaseid":"rahim-utsumi-iwaki-mineno-automatedgrapevineinflorescencecountinginavineyardusingdeeplearningandmultiobjecttracking-2023","role":"author","keyword":["deep learning","high-throughput phenotyping","instance segmentation","multi-object tracking","precision viticulture"],"metadata":{"authorlinks":{}},"downloads":0},"bibtype":"article","biburl":"https://bibbase.org/service/mendeley/bfbbf840-4c42-3914-a463-19024f50b30c","dataSources":["2252seNhipfTmjEBQ"],"keywords":["deep learning","high-throughput phenotyping","instance segmentation","multi-object tracking","precision viticulture"],"search_terms":["automated","grapevine","inflorescence","counting","vineyard","using","deep","learning","multi","object","tracking","rahim","utsumi","iwaki","mineno"],"title":"Automated Grapevine Inflorescence Counting in a Vineyard Using Deep Learning and Multi-object Tracking","year":2023}