var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=http%3A%2F%2Fvclab.science.uoit.ca%2Ffaisal-qureshi.bib&jsonp=1&css=bibbase.org/css/styles/default.css&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=http%3A%2F%2Fvclab.science.uoit.ca%2Ffaisal-qureshi.bib&jsonp=1&css=bibbase.org/css/styles/default.css\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=http%3A%2F%2Fvclab.science.uoit.ca%2Ffaisal-qureshi.bib&jsonp=1&css=bibbase.org/css/styles/default.css\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Hyperspectral Pixel Unmixing with Latent Dirichlet Variational Autoencoder.\n \n \n \n \n\n\n \n Mantripragada, K.; and Qureshi, F. Z.\n\n\n \n\n\n\n IEEE Transactions on Geoscience and Remote Sensing,13pp. 2024.\n \n\n\n\n
\n\n\n\n \n \n \"Hyperspectral paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@ARTICLE{24-ieee-igrs-j,\n  author={Mantripragada, Kiran and Qureshi, Faisal Z.},\n  journal={IEEE Transactions on Geoscience and Remote Sensing}, \n  title={Hyperspectral Pixel Unmixing with Latent Dirichlet Variational Autoencoder}, \n  year={2024},\n  volume={},\n  number={},\n  pages={13pp},\n  abstract={We present a method for hyperspectral pixel unmixing. The proposed method assumes that (1) abundances can be encoded as Dirichlet distributions and (2) spectra of endmembers can be represented as multivariate Normal distributions. The method solves the problem of abundance estimation and endmember extraction within a variational autoencoder setting where a Dirichlet bottleneck layer models the abundances, and the decoder performs endmember extraction. The proposed method can also leverage transfer learning paradigm, where the model is only trained on synthetic data containing pixels that are linear combinations of one or more endmembers of interest. In this case, we retrieve endmembers (spectra) from the United States Geological Survey Spectral Library. The model thus trained can be subsequently used to perform pixel unmixing on “real data” that contains a subset of the endmembers used to generated the synthetic data. The model achieves state-of-the-art results on several benchmarks: Cuprite, Urban Hydice and Samson. We also present new synthetic dataset, OnTech-HSI-Syn-21, that can be used to study hyperspectral pixel unmixing methods. We showcase the transfer learning capabilities of the proposed model on Cuprite and OnTech-HSI-Syn-21 datasets. In summary, the proposed method can be applied for pixel unmixing a variety of domains, including agriculture, forestry, mineralogy, analysis of materials, healthcare, etc. Additionally, the proposed method eschews the need for labelled data for training by leveraging the transfer learning paradigm, where the model is trained on synthetic data generated using the endmembers present in the “real” data.},\n  doi={10.1109/TGRS.2024.3357589},\n  ISSN={},\n  month={},\n  keywords = {hyperspectral-unmixing},\n  url_Paper = {pubs/24-ieee-tgrs-j.pdf}\n}\n\n\n
\n
\n\n\n
\n We present a method for hyperspectral pixel unmixing. The proposed method assumes that (1) abundances can be encoded as Dirichlet distributions and (2) spectra of endmembers can be represented as multivariate Normal distributions. The method solves the problem of abundance estimation and endmember extraction within a variational autoencoder setting where a Dirichlet bottleneck layer models the abundances, and the decoder performs endmember extraction. The proposed method can also leverage transfer learning paradigm, where the model is only trained on synthetic data containing pixels that are linear combinations of one or more endmembers of interest. In this case, we retrieve endmembers (spectra) from the United States Geological Survey Spectral Library. The model thus trained can be subsequently used to perform pixel unmixing on “real data” that contains a subset of the endmembers used to generated the synthetic data. The model achieves state-of-the-art results on several benchmarks: Cuprite, Urban Hydice and Samson. We also present new synthetic dataset, OnTech-HSI-Syn-21, that can be used to study hyperspectral pixel unmixing methods. We showcase the transfer learning capabilities of the proposed model on Cuprite and OnTech-HSI-Syn-21 datasets. In summary, the proposed method can be applied for pixel unmixing a variety of domains, including agriculture, forestry, mineralogy, analysis of materials, healthcare, etc. Additionally, the proposed method eschews the need for labelled data for training by leveraging the transfer learning paradigm, where the model is trained on synthetic data generated using the endmembers present in the “real” data.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n An interactive threshold-setting procedure for improved multivariate anomaly detection in time series.\n \n \n \n \n\n\n \n Lundström, A.; O’Nils, M.; and Qureshi, F. Z.\n\n\n \n\n\n\n IEEE Access,11pp. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@ARTICLE{23-ieee-access-j,\n  author={Lundström, Adam and O’Nils, Mattias and Qureshi, Faisal Z.},\n  journal={IEEE Access}, \n  title={An interactive threshold-setting procedure for improved multivariate anomaly detection in time series}, \n  year={2023},\n  volume={},\n  number={},\n  pages={11pp},\n  abstract={Anomaly detection in multivariate time series is valuable for many applications. In this context, unsupervised and semi-supervised deep learning methods that estimate how normal a new observation is have shown promising results on benchmark datasets. These methods are dependent on a threshold that determines which points should be regarded as anomalous and not be anomalous. However, finding the optimal threshold is not easy since no information about the ground truth is known in advance, which implies that there are limitations to automatic threshold-setting methods available today. An alternative is to utilize the expertise of users that can interact in a threshold-setting procedure, but for this to be practically feasible, the method needs to be both accurate and efficient in relation to the state-of-the-art automatic methods. Therefore, this study develops an interactive threshold-setting schema and examines to what extent it can outperform the current state-of-the-art automatic threshold-setting methods. The result of the study strongly indicates that the suggested method with little effort can provide higher accuracy than the automatic threshold-setting methods on a general basis.},\n  doi={10.1109/ACCESS.2023.3310653},\n  ISSN={2169-3536},\n  month={},\n  keywords = {anomaly-scoring},\n  url_Paper = {pubs/23-ieee-access-j.pdf}\n}\n\n
\n
\n\n\n
\n Anomaly detection in multivariate time series is valuable for many applications. In this context, unsupervised and semi-supervised deep learning methods that estimate how normal a new observation is have shown promising results on benchmark datasets. These methods are dependent on a threshold that determines which points should be regarded as anomalous and not be anomalous. However, finding the optimal threshold is not easy since no information about the ground truth is known in advance, which implies that there are limitations to automatic threshold-setting methods available today. An alternative is to utilize the expertise of users that can interact in a threshold-setting procedure, but for this to be practically feasible, the method needs to be both accurate and efficient in relation to the state-of-the-art automatic methods. Therefore, this study develops an interactive threshold-setting schema and examines to what extent it can outperform the current state-of-the-art automatic threshold-setting methods. The result of the study strongly indicates that the suggested method with little effort can provide higher accuracy than the automatic threshold-setting methods on a general basis.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Error Estimation for Single-Image Human Body Mesh Reconstruction.\n \n \n \n \n\n\n \n Jafarian, H.; and Qureshi, F.\n\n\n \n\n\n\n arXiv cs.CV 2305.17245, 2023.\n \n\n\n\n
\n\n\n\n \n \n \"Error paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{23-arxiv-mesh-error,\n      title={Error Estimation for Single-Image Human Body Mesh Reconstruction}, \n      author={H. Jafarian and F.Z. Qureshi},\n      howpublished={arXiv cs.CV 2305.17245},\n      year={2023},\n      eprint={2305.17245},\n      archivePrefix={arXiv},\n      primaryClass={cs.CV},\n      keywords = {mesh-error},\n      url_Paper = {pubs/23-arxiv-mesh-error.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Iterative Method for Hyperspectral Pixel Unmixing Leveraging Latent Dirichlet Variational Autoencoder.\n \n \n \n \n\n\n \n Mantripragada, K.; Adler, P.; Olsen, P.; and Qureshi, F.\n\n\n \n\n\n\n International Geoscience and Remote Sensing Symposium (IGARSS), July 2023.\n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{23-igarss-paper,\n  title={An Iterative  Method for Hyperspectral Pixel Unmixing Leveraging Latent Dirichlet Variational Autoencoder}, \n  author={K. Mantripragada and P.R. Adler and P.A. Olsen and F.Z. Qureshi},\n  year={2023},\n  pages={4pp},\n  month={July},\n  howpublished={International Geoscience and Remote Sensing Symposium (IGARSS)},\n  url_Paper={pubs/23-igarss-paper.pdf},\n  keywords={iterative-ldvae}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Iterative Method for Hyperspectral Pixel Unmixing Leveraging Latent Dirichlet Variational Autoencoder.\n \n \n \n \n\n\n \n Mantripragada, K.; Adler, P.; Olsen, P.; and Qureshi, F.\n\n\n \n\n\n\n International Geoscience and Remote Sensing Symposium (IGARSS) Abstracts, May 2023.\n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@misc{23-igarss-abstract,\n  title={An Iterative  Method for Hyperspectral Pixel Unmixing Leveraging Latent Dirichlet Variational Autoencoder}, \n  author={K. Mantripragada and P.R. Adler and P.A. Olsen and F.Z. Qureshi},\n  year={2023},\n  pages={2pp},\n  month={May},\n  howpublished={International Geoscience and Remote Sensing Symposium (IGARSS) Abstracts},\n  url_Paper={pubs/23-igarss-abstract.pdf},\n  keywords={iterative-ldvae}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Hyperspectral Image Compression Using Implicit Neural Representations.\n \n \n \n \n\n\n \n Rezasoltani, S.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. 20th Conference on Robots and Vision (CRV23), pages 8pp, Montreal, Jun 2023. \n \n\n\n\n
\n\n\n\n \n \n \"Hyperspectral paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{23-crv-c,\n  author =       {S. Rezasoltani  and F.Z. Qureshi},\n  title =        {Hyperspectral Image Compression Using Implicit Neural Representations},\n  booktitle = {Proc. 20th Conference on Robots and Vision (CRV23)},\n  year =      {2023},\n  month =     {Jun},\n  address =   {Montreal},\n  pages = {8pp},\n  url_Paper = {pubs/23-crv-c.pdf},\n  keywords = {hsi-inr-compression}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Improving deep learning based anomaly detection on multivariate time series through separated anomaly scoring.\n \n \n \n \n\n\n \n Lundstrom, A.; O'Nils, M.; Qureshi, F. Z.; and Jantsch, A.\n\n\n \n\n\n\n IEEE Access, 10: 11pp. Oct 2022.\n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n \n \"Improving paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Article{22-ieee-access-j,\n  author =       {Lundstrom, Adam and O'Nils, Mattias and Qureshi, Faisal Z. and Jantsch, Axel},\n  title =        {Improving deep learning based anomaly detection on multivariate time series through separated anomaly scoring},\n  journal =      {IEEE Access},\n  year =         {2022},\n  volume =    {10},\n  number =    {},\n  pages =     {11pp},\n  month =     {Oct},\n  url = {10.1109/ACCESS.2022.3213038},\n  keywords = {anomaly-scoring},\n  url_Paper = {pubs/22-ieee-access-j.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The Effects of Spectral Dimensionality Reduction on Hyperspectral Pixel Classification: A Case Study.\n \n \n \n \n\n\n \n Mantripragada, K.; Dao, P. D.; He, Y.; and Qureshi, F. Z.\n\n\n \n\n\n\n PLOS ONE, 17(7): 24pp. July 2022.\n \n\n\n\n
\n\n\n\n \n \n \"ThePaper\n  \n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Article{22-plos-one-j,\n  author =       {Mantripragada, Kiran and Dao, Phuong D. and He, Yuhong and Qureshi, Faisal Z.},\n  title =        {The Effects of Spectral Dimensionality Reduction on Hyperspectral Pixel Classification: A Case Study},\n  journal =      {PLOS ONE},\n  year =         {2022},\n  volume =    {17},\n  number =    {7},\n  pages =     {24pp},\n  month =     {July},\n  url = {https://doi.org/10.1371/journal.pone.0269174},\n  keywords = {hsi},\n  url_Paper = {pubs/22-plos-one-j.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Evaluation of 2D-/3D-Feet-Detection Methods for Semi-Autonomous Powered Wheelchair Navigation.\n \n \n \n \n\n\n \n Giménez, C. V.; Krug, S.; Qureshi, F. Z.; and O’Nils, M.\n\n\n \n\n\n\n Journal of Imaging, 7(12). 2021.\n \n\n\n\n
\n\n\n\n \n \n \"EvaluationPaper\n  \n \n \n \"Evaluation paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Article{21-jimaging-j2,\nAUTHOR = {Giménez, Cristian Vilar and Krug, Silvia and Qureshi, Faisal Z. and O’Nils, Mattias},\nTITLE = {Evaluation of 2D-/3D-Feet-Detection Methods for Semi-Autonomous Powered Wheelchair Navigation},\nJOURNAL = {Journal of Imaging},\nVOLUME = {7},\nYEAR = {2021},\nNUMBER = {12},\nARTICLE-NUMBER = {255},\nURL = {https://www.mdpi.com/2313-433X/7/12/255},\nISSN = {2313-433X},\nABSTRACT = {Powered wheelchairs have enhanced the mobility and quality of life of people with special needs. The next step in the development of powered wheelchairs is to incorporate sensors and electronic systems for new control applications and capabilities to improve their usability and the safety of their operation, such as obstacle avoidance or autonomous driving. However, autonomous powered wheelchairs require safe navigation in different environments and scenarios, making their development complex. In our research, we propose, instead, to develop contactless control for powered wheelchairs where the position of the caregiver is used as a control reference. Hence, we used a depth camera to recognize the caregiver and measure at the same time their relative distance from the powered wheelchair. In this paper, we compared two different approaches for real-time object recognition using a 3DHOG hand-crafted object descriptor based on a 3D extension of the histogram of oriented gradients (HOG) and a convolutional neural network based on YOLOv4-Tiny. To evaluate both approaches, we constructed Miun-Feet&mdash;a custom dataset of images of labeled caregiver&rsquo;s feet in different scenarios, with backgrounds, objects, and lighting conditions. The experimental results showed that the YOLOv4-Tiny approach outperformed 3DHOG in all the analyzed cases. In addition, the results showed that the recognition accuracy was not improved using the depth channel, enabling the use of a monocular RGB camera only instead of a depth camera and reducing the computational cost and heat dissipation limitations. Hence, the paper proposes an additional method to compute the caregiver&rsquo;s distance and angle from the Powered Wheelchair (PW) using only the RGB data. This work shows that it is feasible to use the location of the caregiver&rsquo;s feet as a control signal for the control of a powered wheelchair and that it is possible to use a monocular RGB camera to compute their relative positions.},\nDOI = {10.3390/jimaging7120255},\nurl_Paper = {pubs/21-jimaging-j2.pdf},\nkeywords = {feet-localization}\n}\n\n
\n
\n\n\n
\n Powered wheelchairs have enhanced the mobility and quality of life of people with special needs. The next step in the development of powered wheelchairs is to incorporate sensors and electronic systems for new control applications and capabilities to improve their usability and the safety of their operation, such as obstacle avoidance or autonomous driving. However, autonomous powered wheelchairs require safe navigation in different environments and scenarios, making their development complex. In our research, we propose, instead, to develop contactless control for powered wheelchairs where the position of the caregiver is used as a control reference. Hence, we used a depth camera to recognize the caregiver and measure at the same time their relative distance from the powered wheelchair. In this paper, we compared two different approaches for real-time object recognition using a 3DHOG hand-crafted object descriptor based on a 3D extension of the histogram of oriented gradients (HOG) and a convolutional neural network based on YOLOv4-Tiny. To evaluate both approaches, we constructed Miun-Feet—a custom dataset of images of labeled caregiver’s feet in different scenarios, with backgrounds, objects, and lighting conditions. The experimental results showed that the YOLOv4-Tiny approach outperformed 3DHOG in all the analyzed cases. In addition, the results showed that the recognition accuracy was not improved using the depth channel, enabling the use of a monocular RGB camera only instead of a depth camera and reducing the computational cost and heat dissipation limitations. Hence, the paper proposes an additional method to compute the caregiver’s distance and angle from the Powered Wheelchair (PW) using only the RGB data. This work shows that it is feasible to use the location of the caregiver’s feet as a control signal for the control of a powered wheelchair and that it is possible to use a monocular RGB camera to compute their relative positions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Temporal Boosted YOLO-Based Model for Birds Detection around Wind Farms.\n \n \n \n \n\n\n \n Alqaysi, H.; Fedorov, I.; Qureshi, F. Z.; and O’Nils, M.\n\n\n \n\n\n\n Journal of Imaging, 7(11): 13pp. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n \n \"A paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Article{21-jimaging-j,\nAUTHOR = {Alqaysi, Hiba and Fedorov, Igor and Qureshi, Faisal Z. and O’Nils, Mattias},\nTITLE = {A Temporal Boosted YOLO-Based Model for Birds Detection around Wind Farms},\nJOURNAL = {Journal of Imaging},\nVOLUME = {7},\nYEAR = {2021},\nNUMBER = {11},\npages = {13pp},\nARTICLE-NUMBER = {227},\nURL = {https://www.mdpi.com/2313-433X/7/11/227},\nISSN = {2313-433X},\nABSTRACT = {Object detection for sky surveillance is a challenging problem due to having small objects in a large volume and a constantly changing background which requires high resolution frames. For example, detecting flying birds in wind farms to prevent their collision with the wind turbines. This paper proposes a YOLOv4-based ensemble model for bird detection in grayscale videos captured around wind turbines in wind farms. In order to tackle this problem, we introduce two datasets—(1) Klim and (2) Skagen—collected at two locations in Denmark. We use Klim training set to train three increasingly capable YOLOv4 based models. Model 1 uses YOLOv4 trained on the Klim dataset, Model 2 introduces tiling to improve small bird detection, and the last model uses tiling and temporal stacking and achieves the best mAP values on both Klim and Skagen datasets. We used this model to set up an ensemble detector, which further improves mAP values on both datasets. The three models achieve testing mAP values of 82%, 88%, and 90% on the Klim dataset. mAP values for Model 1 and Model 3 on the Skagen dataset are 60% and 92%. Improving object detection accuracy could mitigate birds’ mortality rate by choosing the locations for such establishment and the turbines location. It can also be used to improve the collision avoidance systems used in wind energy facilities.},\nDOI = {10.3390/jimaging7110227},\nurl_Paper = {pubs/21-jimaging-j.pdf},\nkeywords = {bird-detection}\n}\n\n
\n
\n\n\n
\n Object detection for sky surveillance is a challenging problem due to having small objects in a large volume and a constantly changing background which requires high resolution frames. For example, detecting flying birds in wind farms to prevent their collision with the wind turbines. This paper proposes a YOLOv4-based ensemble model for bird detection in grayscale videos captured around wind turbines in wind farms. In order to tackle this problem, we introduce two datasets—(1) Klim and (2) Skagen—collected at two locations in Denmark. We use Klim training set to train three increasingly capable YOLOv4 based models. Model 1 uses YOLOv4 trained on the Klim dataset, Model 2 introduces tiling to improve small bird detection, and the last model uses tiling and temporal stacking and achieves the best mAP values on both Klim and Skagen datasets. We used this model to set up an ensemble detector, which further improves mAP values on both datasets. The three models achieve testing mAP values of 82%, 88%, and 90% on the Klim dataset. mAP values for Model 1 and Model 3 on the Skagen dataset are 60% and 92%. Improving object detection accuracy could mitigate birds’ mortality rate by choosing the locations for such establishment and the turbines location. It can also be used to improve the collision avoidance systems used in wind energy facilities.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Improving hyperspectral image segmentation by applying inverse noise weighting and outlier removal for optimal scale selection.\n \n \n \n \n\n\n \n Dao, P. D.; Mantripragada, K.; He, Y.; and Qureshi, F. Z.\n\n\n \n\n\n\n ISPRS Journal of Photogrammetry and Remote Sensing, 171: 348 - 366. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"ImprovingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@article{20-photo-j,\ntitle = {Improving hyperspectral image segmentation by applying inverse noise weighting and outlier removal for optimal scale selection},\njournal = {ISPRS Journal of Photogrammetry and Remote Sensing},\nvolume = {171},\npages = {348 - 366},\nyear = {2021},\nissn = {0924-2716},\ndoi = {https://doi.org/10.1016/j.isprsjprs.2020.11.013},\nurl = {http://www.sciencedirect.com/science/article/pii/S0924271620303208},\nauthor = {Phuong D. Dao and Kiran Mantripragada and Yuhong He and Faisal Z. Qureshi},\nkeywords = {hyperspectral-segmentation},\nabstract = {Optimal scale selection for image segmentation is an essential component of the Object-Based Image Analysis (OBIA) and interpretation. An optimal segmentation scale is a scale at which image objects, overall, best represent real-world ground objects and features across the entire image. At this scale, the intra-object variance is ideally lowest and the inter-object spatial autocorrelation is ideally highest, and a change in the scale could cause an abrupt change in these measures. Unsupervised parameter optimization methods typically use global measures of spatial and spectral properties calculated from all image objects in all bands as the target criteria to determine the optimal segmentation scale. However, no studies consider the effect of noise in image spectral bands on the segmentation assessment and scale selection. Furthermore, these global measures could be affected by outliers or extreme values from a small number of objects. These issues may lead to incorrect assessment and selection of optimal scales and cause the uncertainties in subsequent segmentation and classification results. These issues become more pronounced when segmenting hyperspectral data with large spectral variability across the spectrum. In this study, we propose an enhanced method that 1) incorporates the band’s inverse noise weighting in the segmentation and 2) detects and removes outliers before determining segmentation scale parameters. The proposed method is evaluated on three well-established segmentation approaches – k-means, mean-shift, and watershed. The generated segments are validated by comparing them with reference polygons using normalized over-segmentation (OS), under-segmentation (US), and the Euclidean Distance (ED) indices. The results demonstrate that this proposed scale selection method produces more accurate and reliable segmentation results. The approach can be applied to other segmentation selection criteria and are useful for automatic multi-parameter tuning and optimal scale parameter selections in OBIA methods in remote sensing.}\n}\n\n
\n
\n\n\n
\n Optimal scale selection for image segmentation is an essential component of the Object-Based Image Analysis (OBIA) and interpretation. An optimal segmentation scale is a scale at which image objects, overall, best represent real-world ground objects and features across the entire image. At this scale, the intra-object variance is ideally lowest and the inter-object spatial autocorrelation is ideally highest, and a change in the scale could cause an abrupt change in these measures. Unsupervised parameter optimization methods typically use global measures of spatial and spectral properties calculated from all image objects in all bands as the target criteria to determine the optimal segmentation scale. However, no studies consider the effect of noise in image spectral bands on the segmentation assessment and scale selection. Furthermore, these global measures could be affected by outliers or extreme values from a small number of objects. These issues may lead to incorrect assessment and selection of optimal scales and cause the uncertainties in subsequent segmentation and classification results. These issues become more pronounced when segmenting hyperspectral data with large spectral variability across the spectrum. In this study, we propose an enhanced method that 1) incorporates the band’s inverse noise weighting in the segmentation and 2) detects and removes outliers before determining segmentation scale parameters. The proposed method is evaluated on three well-established segmentation approaches – k-means, mean-shift, and watershed. The generated segments are validated by comparing them with reference polygons using normalized over-segmentation (OS), under-segmentation (US), and the Euclidean Distance (ED) indices. The results demonstrate that this proposed scale selection method produces more accurate and reliable segmentation results. The approach can be applied to other segmentation selection criteria and are useful for automatic multi-parameter tuning and optimal scale parameter selections in OBIA methods in remote sensing.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Stream Algebra for Performance Optimization of Large Scale Computer Vision Pipelines.\n \n \n \n \n\n\n \n Helala, M. A.; Qureshi, F. Z.; and Pu, K. Q.\n\n\n \n\n\n\n IEEE Pattern Analysis and Machine Intelligence. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Article{20-pami-j,\n  author = {Mohamed A. Helala and Faisal Z. Qureshi and Ken Q. Pu},\n  title = {A Stream Algebra for Performance Optimization of Large Scale Computer Vision Pipelines},\n  journal = {IEEE Pattern Analysis and Machine Intelligence},\n  year = {2020},\n  doi = {10.1109/TPAMI.2020.3015867},\n  keywords = {stream-algebra},\n  url_Paper = {pubs/20-pami-j.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Residual-Dyad Encoder Discriminator Network for Remote Sensing Image Matching.\n \n \n \n \n\n\n \n Khurshid, N.; Mohbat; Taj, M.; and Qureshi, F.\n\n\n \n\n\n\n IEEE Transactions on Geoscience and Remote Sensing, 58: 2001–2014. march 2020.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Article{19-tgrs-j,\n  author =       {N. Khurshid and Mohbat and M. Taj and F.Z. Qureshi},\n  title =        {A Residual-Dyad Encoder Discriminator Network for Remote Sensing Image Matching},\n  journal =      {IEEE Transactions on Geoscience and Remote Sensing},\n  year =         {2020},\n  month = {march},\n  pages =     {2001--2014},\n  volume =      {58},\n  issue = {3},\n  url_Paper = {pubs/19-tgrs-j.pdf},\n  keywords = {residual-dyad},\n  doi = {10.1109/TGRS.2019.2951820},\n  issn = {1558-0644},\n  print_issn = {0196-2892}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Structure Guided Image Inpainting using Edge Prediction.\n \n \n \n \n\n\n \n Nazeri, K.; Ng, E.; Joseph, T.; Qureshi, F.; and Ebrahimi, M.\n\n\n \n\n\n\n In Proc. International Conference on Computer Vision Workshop on Advances in Image Manipulation (AIM19), pages 10pp, Seoul, Nov 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Structure link\n  \n \n \n \"Structure paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{19-aim-w,\n  author =       {K. Nazeri and E. Ng and T. Joseph and F.Z. Qureshi and M. Ebrahimi},\n  title =        {Structure Guided Image Inpainting using Edge Prediction},\n  booktitle = {Proc. International Conference on Computer Vision Workshop on Advances in Image Manipulation (AIM19)},\n  year =      {2019},\n  month =     {Nov},\n  address =   {Seoul},\n  pages = {10pp},\n  url_link = {https://arxiv.org/abs/1901.00212},\n  url_Paper = {pubs/19-aim-w.pdf},\n  keywords = {edge-connect}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Joint Spatial and Layer Attention for Convolutional Networks.\n \n \n \n \n\n\n \n Joseph, T.; Derpanis, K.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. 30th British Machine Vision Conference (BMVC19), pages 14pp, Cardiff, Sep 2019. \n \n\n\n\n
\n\n\n\n \n \n \"Joint link\n  \n \n \n \"Joint paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{19-bmvc-c,\n  author =       {T. Joseph  and K.G. Derpanis and F.Z. Qureshi},\n  title =        {Joint Spatial and Layer Attention for Convolutional Networks},\n  booktitle = {Proc. 30th British Machine Vision Conference (BMVC19)},\n  year =      {2019},\n  month =     {Sep},\n  address =   {Cardiff},\n  pages = {14pp},\n  url_link = {https://arxiv.org/abs/1901.05376},\n  url_Paper = {pubs/19-bmvc-c.pdf},\n  keywords = {uan}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Compact Neural Network Solutions to Laplace's Equation in a Nanofluidic Device.\n \n \n \n \n\n\n \n Magill, M.; Qureshi, F.; and de Haan, H.\n\n\n \n\n\n\n In NuerIPS 18 Workshop on Compact Deep Neural Networks with Industrial Applications, pages 5pp, Montreal, Dec 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Compact paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{18-cdnn-w,\n  author =       {M. Magill  and F.Z. Qureshi and H.W. de Haan},\n  title =        {Compact Neural Network Solutions to Laplace's Equation in a Nanofluidic Device},\n  booktitle = {NuerIPS 18 Workshop on Compact Deep Neural Networks with Industrial Applications},\n  year =      {2018},\n  month =     {Dec},\n  address =   {Montreal},\n  pages = {5pp},\n  url_Paper = {pubs/18-cdnnria-w.pdf},\n  keywords = {dnn}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Neural Networks Trained to Solve Differential Equations Learn General Representations.\n \n \n \n \n\n\n \n Magill, M.; Qureshi, F.; and de Haan, H.\n\n\n \n\n\n\n In Proc. The Thirty-second Annual Conference on Neural Information Processing Systems (NuerIPS 18), pages 11pp, Montreal, Dec 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Neural paper\n  \n \n \n \"Neural link\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{18-nips-c,\n  author =       {M. Magill  and F.Z. Qureshi and H.W. de Haan},\n  title =        {Neural Networks Trained to Solve Differential\nEquations Learn General Representations},\n  booktitle = {Proc. The Thirty-second Annual Conference on Neural Information Processing Systems (NuerIPS 18)},\n  year =      {2018},\n  month =     {Dec},\n  address =   {Montreal},\n  pages = {11pp},\n  url_Paper = {pubs/18-nips-c.pdf},\n  url_link = {https://papers.nips.cc/paper/7662-neural-networks-trained-to-solve-differential-equations-learn-general-representations},\n  keywords = {dnn}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Real-time Video Summarization on Commodity Hardware.\n \n \n \n \n\n\n \n Taylor, W.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. 12th ACM International Conference on Distributed Smart Cameras (ICDSC 18), pages 8pp, Eidenhoven, September 2018. \n \n\n\n\n
\n\n\n\n \n \n \"Real-time paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{18-icdsc-c,\n  author =       {W. Taylor  and F.Z. Qureshi},\n  title =        {Real-time Video Summarization on Commodity Hardware},\n  booktitle = {Proc. 12th ACM International Conference on Distributed Smart Cameras (ICDSC 18)},\n  year =      {2018},\n  month =     {September},\n  address =   {Eidenhoven},\n  pages = {8pp},\n  url_paper = {pubs/18-icdsc-c.pdf},\n  keywords = {video-summarization-icdsc}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Fast Estimation of Large Displacement Optical Flow Using Dominant Motion Patterns & Sub-Volume PatchMatch Filtering.\n \n \n \n \n\n\n \n Helala, M.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. 14th Conference on Computer and Robot Vision (CRV 17), pages 8pp, Edmonton, May 2017. \n Best Computer Vision Paper\n\n\n\n
\n\n\n\n \n \n \"Fast paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{17-crv-c-opticalflow,\n  author =       {M.A. Helala  and F.Z. Qureshi},\n  title =        {Fast Estimation of Large Displacement Optical Flow\nUsing Dominant Motion Patterns & Sub-Volume PatchMatch Filtering},\n  booktitle = {Proc. 14th Conference on Computer and Robot Vision (CRV 17)},\n  year =      {2017},\n  month =     {May},\n  address =   {Edmonton},\n  pages = {8pp},\n  note = {Best Computer Vision Paper},\n  url_Paper = {pubs/17-crv-c-opticalflow.pdf},\n  keywords = {cost-volume-optical-flow}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n An Index Structure for Fast Range Search in Hamming Space.\n \n \n \n \n\n\n \n Reina, E.; Pu, K.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. 14th Conference on Computer and Robot Vision (CRV 17), pages 8pp, Edmonton, May 2017. \n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{17-crv-c-trie,\n  author =       {E.M. Reina and K.Q. Pu and F.Z. Qureshi},\n  title =        {An Index Structure for Fast Range Search in Hamming Space},\n  booktitle = {Proc. 14th Conference on Computer and Robot Vision (CRV 17)},\n  year =      {2017},\n  month =     {May},\n  address =   {Edmonton},\n  pages = {8pp},\n  keywords = {trie-indexing},\n  url_Paper = {pubs/17-crv-c-trie.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Formal Algebra Implementation for Distributed Image and Video Stream Processing.\n \n \n \n \n\n\n \n Helala, M.; Pu, K.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. 10th International Conference on Distributed Smart Cameras (ICDSC 16), pages 8pp, Paris, Sep 2016. \n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{16-icdsc-c,\n  author =       {M.A. Helala and K.Q. Pu and F.Z. Qureshi},\n  title =        {A Formal Algebra Implementation for Distributed Image and Video Stream Processing},\n  booktitle = {Proc. 10th International Conference on Distributed Smart Cameras (ICDSC 16)},\n  year =      {2016},\n  month =     {Sep},\n  address =   {Paris},\n  keywords = {stream-algebra},\n  pages = {8pp},\n  url_Paper = {pubs/16-icdsc-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Constructing Image Mosaics Using Focus Based Depth Analysis.\n \n \n \n \n\n\n \n Helala, M.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. IEEE Winter Applications of Computer Vision Conference (WACV 16), pages 7pp, Lake Placid, Mar 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Constructing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{16-wacv-2-c,\n  author =       {M.A. Helala and F.Z. Qureshi},\n  title =        {Constructing Image Mosaics Using Focus Based Depth Analysis},\n  booktitle = {Proc. IEEE Winter Applications of Computer Vision Conference (WACV 16)},\n  year =      {2016},\n  month =     {Mar},\n  address =   {Lake Placid},\n  pages = {7pp},\n  keywords = {aerial-mosaics},\n  url_Paper = {pubs/16-wacv-2-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Automatic Video Editing for Sensor-Rich Videos.\n \n \n \n \n\n\n \n Taylor, W.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. IEEE Winter Applications of Computer Vision Conference (WACV 16), pages 9pp, Lake Placid, Mar 2016. \n \n\n\n\n
\n\n\n\n \n \n \"Automatic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{16-wacv-c,\n  author =       {W. Taylor and F.Z. Qureshi},\n  title =        {Automatic Video Editing for Sensor-Rich Videos},\n  booktitle = {Proc. IEEE Winter Applications of Computer Vision Conference (WACV 16)},\n  year =      {2016},\n  month =     {Mar},\n  address =   {Lake Placid},\n  pages = {9pp},\n  keywords = {video-summarization-wacv},\n  url_Paper = {pubs/16-wacv-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Automatic Parsing of Lane and Road Boundaries in Challenging Traffic Scenes.\n \n \n \n \n\n\n \n Helala, M.; Qureshi, F.; and Pu, K.\n\n\n \n\n\n\n SPIE Journal on Electronic Imaging, 24: 15pp. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Automatic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Article{15-jei-j,\n  author =       {M. Helala and F.Z. Qureshi and K.Q. Pu},\n  title =        {Automatic Parsing of Lane and Road Boundaries in Challenging Traffic Scenes},\n  journal =      {SPIE Journal on Electronic Imaging},\n  year =         {2015},\n  pages =         {15pp},\n  volume =       {24},\n  issue =        {5},\n  url_Paper = {pubs/15-jei-j.pdf},\n  keywords = {road-boundary}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stereo Reconstruction of Droplet Flight Trajectories.\n \n \n \n \n\n\n \n Zarrabeitia, L.; Qureshi, F.; and Aruliah, D.\n\n\n \n\n\n\n IEEE Pattern Analysis and Machine Intelligence, 37(4): 847–861. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"Stereo link\n  \n \n \n \"Stereo paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{14-pami-j,\n  author =       {L.A. Zarrabeitia and F.Z. Qureshi and D.A. Aruliah},\n  title =        {Stereo Reconstruction of Droplet Flight Trajectories},\n  journal =      {IEEE Pattern Analysis and Machine Intelligence},\n  year =         {2015},\n  volume = {37},\n  number = {4},\n  pages = {847--861},\n  doi={10.1109/TPAMI.2014.2353638},\n  ISSN={0162-8828},\n  url_Link = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6888516},\n  url_Paper = {pubs/14-pami-j.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Towards Efficient Feedback Control in Streaming Computer Vision Pipelines.\n \n \n \n \n\n\n \n Helala, M.; Pu, K.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. Workshop on User-Centered Computer Vision (co-located with ACCV'14), pages 16pp, Singapore, Nov 2014. \n \n\n\n\n
\n\n\n\n \n \n \"Towards paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{14-uccv-w,\n  author =       {M.A. Helala and K.Q. Pu and F.Z. Qureshi},\n  title =        {Towards Efficient Feedback Control in Streaming Computer Vision Pipelines},\n  booktitle = {Proc. Workshop on User-Centered Computer Vision (co-located with ACCV'14)},\n  year =      {2014},\n  month =     {Nov},\n  address =   {Singapore},\n  keywords = {stream-algebra},  \n  pages = {16pp},\n  url_Paper = {pubs/14-uccv-w.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Accelerating Cost Volume Filtering Using Salient Subvolumes and Robust Occlusion Handling.\n \n \n \n \n\n\n \n Helala, M.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. 12th Asian Conference on Computer Vision (ACCV'14), pages 16pp, Singapore, Nov 2014. \n \n\n\n\n
\n\n\n\n \n \n \"Accelerating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{14-accv-c,\n  author =       {M.A. Helala and F.Z. Qureshi},\n  title =        {Accelerating Cost Volume Filtering Using Salient Subvolumes and Robust Occlusion Handling},\n  booktitle = {Proc. 12th Asian Conference on Computer Vision (ACCV'14)},\n  year =      {2014},\n  month =     {Nov},\n  address =   {Singapore},\n  pages = {16pp},\n  keywords = {stereo-cost-volume},\n  url_Paper = {pubs/14-accv-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Negotiation Protocol with Conditional Offers for Camera Handoffs.\n \n \n \n \n\n\n \n Starzyk, W.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. 8th ACM/IEEE International Conference on Distributed Smart Cameras, pages 7pp, Venice, November 2014. \n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{14-icdsc-c,\n  author =       {W. Starzyk and F.Z. Qureshi},\n  title =        {A Negotiation Protocol with Conditional Offers for Camera Handoffs},\n  booktitle = {Proc. 8th ACM/IEEE International Conference on Distributed Smart Cameras},\n  year =      {2014},\n  pages =     {7pp},\n  month =     {November},\n  address =   {Venice},\n  url_Paper = {pubs/14-icdsc-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Stream Algebra for Computer Vision Pipelines.\n \n \n \n \n\n\n \n Helala, M.; Pu, K.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. Second Workshop on Web-scale Vision and Social Media VSM , co-located with CVPR , pages 8pp, Columbus, June 2014. \n \n\n\n\n
\n\n\n\n \n \n \"APaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{14-vsm-w,\nauthor = {M.A. Helala and K.Q. Pu and F.Z. Qureshi},\ntitle = {{A Stream Algebra for Computer Vision Pipelines}},\nbooktitle = {Proc. Second Workshop on Web-scale Vision and Social Media VSM , co-located with CVPR },\nyear = {2014},\npages = {8pp},\naddress = {Columbus},\nmonth = jun,\nkeywords = {stream-algebra},\nrating = {0},\ndate-added = {2014-09-30T00:53:21GMT},\ndate-modified = {2014-10-03T19:26:42GMT},\nuri = {\\url{papers3://publication/uuid/727A73A3-27B4-47A6-A6A2-832D5CC7191D}},\nurl = {pubs/14-vsm-w.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Integrating Consumer Smart Cameras into Camera Networks: Opportunities and Obstacles.\n \n \n \n \n\n\n \n Prati, A.; and Qureshi, F.\n\n\n \n\n\n\n IEEE Computer (Special Issue on \"Smart Camera Networks\", 47(5): 45–51. May 2014.\n \n\n\n\n
\n\n\n\n \n \n \"Integrating link\n  \n \n \n \"Integrating paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{14-computer-j,\n  author =       {A. Prati and F.Z. Qureshi},\n  title =        {Integrating Consumer Smart Cameras into Camera Networks: Opportunities and Obstacles},\n  journal =      {IEEE Computer (Special Issue on "Smart Camera Networks"},\n  year =         {2014},\n  pages =     {45--51},\n  month =     {May},\n  volume = {47},\n  number = {5},\n  doi={10.1109/MC.2014.125},\n  ISSN={0018-9162},\n  url_Link = {http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6818916},\n  url_Paper = {pubs/14-ieee-computers-j.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Virtual Vision for Camera Networks Research.\n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n Volume 4 . Academic Press Library in Signal Processing: Image, Video Processing and Analysis, Hardware, Audio, Acoustics and Speech Processing, pages 609—625. Chellapa, R.; and S.Theodoridis, editor(s). Elsevier, January 2014.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InBook{14-ch,\n  author =    {F.Z. Qureshi and D. Terzopoulos},\n  editor =    {R. Chellapa and S.Theodoridis},\n  title =        {Academic Press Library in Signal Processing: Image, Video Processing and Analysis, Hardware, Audio, Acoustics and Speech Processing},\n  chapter =      {Virtual Vision for Camera Networks Research},\n  publisher =    {Elsevier},\n  year =         {2014},\n  volume =    {4},\n  number =    {21},\n  month =     {January},\n  pages =     {609—625}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Topic Models for Image Localization.\n \n \n \n \n\n\n \n Wang, Z.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. Tenth Conference on Computer Robot Vision (CRV 13), pages 6pp, Regina, May 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Topic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{13-crv-c,\nauthor = {Z. Wang and F.Z. Qureshi},\ntitle = {{Topic Models for Image Localization}},\nbooktitle = {Proc. Tenth Conference on Computer Robot Vision (CRV 13)},\nyear = {2013},\npages = {6pp},\naddress = {Regina},\nmonth = may,\nrating = {0},\ndate-added = {2014-04-09T15:58:10GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nuri = {\\url{papers3://publication/uuid/6439A990-CDF8-4EE7-86B8-28778D903B85}},\nurl_Paper = {pubs/13-crv-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n I Remember Seeing This Video: Image Driven Search in Video Collections .\n \n \n \n \n\n\n \n Wang, Z.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. Tenth Conference on Computer and Robot Vision (CRV13) , pages 6pp, Regina, May 2013. \n \n\n\n\n
\n\n\n\n \n \n \"I paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{13-crv-2-c,\nauthor = {Z. Wang and F.Z. Qureshi},\ntitle = {{I Remember Seeing This Video: Image Driven Search in Video Collections }},\nbooktitle = {Proc. Tenth Conference on Computer and Robot Vision (CRV13) },\nyear = {2013},\npages = {6pp},\naddress = {Regina},\nmonth = may,\nrating = {0},\ndate-added = {2014-09-30T00:56:21GMT},\ndate-modified = {2014-10-03T19:26:42GMT},\nuri = {\\url{papers3://publication/uuid/08B1750A-5684-470A-A3CE-A69B0FB73B81}},\nurl_Paper = {pubs/13-crv-2-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Droplet Tracking from Unsynchronized Cameras.\n \n \n \n \n\n\n \n Zarrabeitia, L.; Aruliah, D.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. Second International Conference on Pattern Recognition Applications and Methods ICPRAM , pages 8pp, Barcelona, February 2013. \n \n\n\n\n
\n\n\n\n \n \n \"Droplet paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{13-icpram-c,\nauthor = {L.A. Zarrabeitia and D.A. Aruliah and F.Z. Qureshi},\ntitle = {{Droplet Tracking from Unsynchronized Cameras}},\nbooktitle = {Proc. Second International Conference on Pattern Recognition Applications and Methods ICPRAM },\nyear = {2013},\npages = {8pp},\naddress = {Barcelona},\nmonth = feb,\nrating = {0},\ndate-added = {2014-09-30T00:58:47GMT},\ndate-modified = {2014-10-03T19:26:42GMT},\nuri = {\\url{papers3://publication/uuid/7AB90BF2-B645-4A99-874F-A1B29BAC411F}},\nurl_Paper = {pubs/13-icpram-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Software Laboratory for Camera Networks Research.\n \n \n \n \n\n\n \n Starzyk, W.; and Qureshi, F.\n\n\n \n\n\n\n IEEE Journal on Emerging and Selected Topics in Circuits and Systems (Special Issue on \"Computational and Smart Cameras\", 3(2): 284–293. June 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Software paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{13-jetcas-j,\n  author =       {W. Starzyk and F.Z. Qureshi},\n  title =        {Software Laboratory for Camera Networks Research},\n  journal =      {IEEE Journal on Emerging and Selected Topics in Circuits and Systems (Special Issue on "Computational and Smart Cameras"},\n  year =         {2013},\n  volume =    {3},\n  number =    {2},\n  pages =     {284--293},\n  month =     {June},\n  url_Paper = {pubs/13-jetcas-j-official.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Object Video Streams: A Framework for Preserving Privacy in Video Surveillance.\n \n \n \n \n\n\n \n Qureshi, F.\n\n\n \n\n\n\n Intelligent Multimedia Surveillance: Current Trends and Approach, pages 67–82. Atrey, P.; Kankanhalli, M.; and Cavallaro, A., editor(s). Springer, New York, August 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Intelligent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InBook{13-object-video-streams-ch,\n  author =    {F.Z. Qureshi},\n  editor =    {P. Atrey and M.S. Kankanhalli and A. Cavallaro},\n  title =        {Intelligent Multimedia Surveillance: Current Trends and Approach},\n  chapter =      {Object Video Streams: A Framework for Preserving Privacy in Video Surveillance},\n  publisher =    {Springer},\n  year =         {2013},\n  number =    {4},\n  keywords = {video-privacy},\n  address =   {New York},\n  month =     {August},\n  pages =     {67--82},\n  url_Paper = {pubs/13-object-video-streams-ch.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Mosaic of Near Ground UAV Videos Under Parallax Effects.\n \n \n \n \n\n\n \n Helala, M.; Zarrabeitia, L.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. Sixth ACM/IEEE International Conference on Distributed Smart Cameras (ICDSC12), pages 6pp, Hong Kong, October 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Mosaic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{12-icdsc-c,\nauthor = {M.A. Helala and L.A. Zarrabeitia and F.Z. Qureshi},\ntitle = {{Mosaic of Near Ground UAV Videos Under Parallax Effects}},\nbooktitle = {Proc. Sixth ACM/IEEE International Conference on Distributed Smart Cameras (ICDSC12)},\nyear = {2012},\npages = {6pp},\naddress = {Hong Kong},\nmonth = oct,\nrating = {0},\ndate-added = {2014-04-09T15:55:25GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2012/Helala/2012%20Helala.pdf},\nfile = {{2012 Helala.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2012/Helala/2012 Helala.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/BE9A3FA6-617E-4CEE-AD70-D73169C82A51}},\nurl_Paper = {pubs/12-icdsc-c.pdf},\nkeywords = {aerial-mosaics}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Road Boundary Detection in Challenging Scenarios.\n \n \n \n \n\n\n \n Helala, M.; Pu, K.; and Qureshi, F.\n\n\n \n\n\n\n In 2012 9th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS), pages 428–433, Beijing, September 2012. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Road link\n  \n \n \n \"Road paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{12-avss-c,\nauthor = {M.A. Helala and K.Q. Pu and F.Z. Qureshi},\ntitle = {{Road Boundary Detection in Challenging Scenarios}},\nbooktitle = {2012 9th IEEE International Conference on Advanced Video and Signal Based Surveillance (AVSS)},\nyear = {2012},\npages = {428--433},\npublisher = {IEEE},\naddress = {Beijing},\nmonth = sep,\ndoi = {10.1109/AVSS.2012.61},\nisbn = {978-1-4673-2499-1},\nrating = {0},\ndate-added = {2014-09-30T01:00:51GMT},\ndate-modified = {2014-09-30T01:12:36GMT},\nurl_Link = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6328052},\nuri = {\\url{papers3://publication/doi/10.1109/AVSS.2012.61}},\nurl_Paper = {pubs/12-avss-c.pdf},\nkeywords = {road-boundary}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Virtual Vision Simulator for Camera Networks Research.\n \n \n \n \n\n\n \n Starzyk, W.; Domurad, A.; and Qureshi, F.\n\n\n \n\n\n\n In 2012 Canadian Conference on Computer and Robot Vision (CRV), pages 306–313, Toronto, May 2012. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"A link\n  \n \n \n \"A paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{12-crv-c,\nauthor = {W. Starzyk and A. Domurad and F.Z. Qureshi},\ntitle = {{A Virtual Vision Simulator for Camera Networks Research}},\nbooktitle = {2012 Canadian Conference on Computer and Robot Vision (CRV)},\nyear = {2012},\npages = {306--313},\npublisher = {IEEE},\naddress = {Toronto},\nmonth = may,\ndoi = {10.1109/CRV.2012.47},\nisbn = {978-1-4673-1271-4},\nrating = {0},\ndate-added = {2014-09-30T01:09:27GMT},\ndate-modified = {2014-09-30T01:12:39GMT},\nurl_Link = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=6233156},\nuri = {\\url{papers3://publication/doi/10.1109/CRV.2012.47}},\nurl_Paper = {pubs/12-crv-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Extraction of Blood Droplet Flight Trajectories from Videos for Forensic Analysis.\n \n \n \n \n\n\n \n Zarrabeitia, L.; Aruliah, D.; and Qureshi, F.\n\n\n \n\n\n\n In 1st International Conference on Pattern Recognition Applications and Methods (ICPRAM 12), pages 142–153, Vilamoura, February 2012. \n \n\n\n\n
\n\n\n\n \n \n \"Extraction paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{12-icpram-c,\nauthor = {L.A. Zarrabeitia and D.A. Aruliah and F.Z. Qureshi},\ntitle = {{Extraction of Blood Droplet Flight Trajectories from Videos for Forensic Analysis}},\nbooktitle = {1st International Conference on Pattern Recognition Applications and Methods (ICPRAM 12)},\nyear = {2012},\npages = {142--153},\naddress = {Vilamoura},\nmonth = feb,\nrating = {0},\ndate-added = {2014-04-09T15:59:24GMT},\ndate-modified = {2014-09-30T01:13:35GMT},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2012/Zarrabeitia/2012%20Zarrabeitia.pdf},\nfile = {{2012 Zarrabeitia.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2012/Zarrabeitia/2012 Zarrabeitia.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/B0CCE457-47BA-4FB9-8C58-04074D31963A}},\nurl_Paper = {pubs/12-icpram-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Learning proactive control strategies for PTZ cameras.\n \n \n \n \n\n\n \n Qureshi, F.; and Starzyk, W.\n\n\n \n\n\n\n In Proc. Fifth ACM/IEEE International Conference on Distributed Smart Cameras (ICDSC), pages 1–6, Ghent, August 2011. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"LearningPaper\n  \n \n \n \"Learning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{11-icdsc-c,\nauthor = {F.Z. Qureshi and W. Starzyk},\ntitle = {{Learning proactive control strategies for PTZ cameras}},\nbooktitle = {Proc. Fifth ACM/IEEE International Conference on Distributed Smart Cameras (ICDSC)},\nyear = {2011},\npages = {1--6},\npublisher = {IEEE},\naddress = {Ghent},\nmonth = aug,\nrating = {0},\ndate-added = {2014-04-09T16:07:33GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nurl = {http://ieeexplore.ieee.org/xpls/abs\\_all.jsp?arnumber=6042928},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2011/Qureshi/2011%20Qureshi.pdf},\nfile = {{2011 Qureshi.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2011/Qureshi/2011 Qureshi.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/38630B54-DE04-41C1-9800-F539D959464E}},\nurl_Paper = {pubs/11-icdsc-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multi-tasking Smart Cameras for Intelligent Video Surveillance Systems.\n \n \n \n \n\n\n \n Starzyk, W.; and Qureshi, F.\n\n\n \n\n\n\n In Proc. 8th IEEE International Conference on Advanced Video and Signal-Based Surveillance (AVSS 11), pages 6pp, Klagenfurt, August 2011. \n \n\n\n\n
\n\n\n\n \n \n \"Multi-tasking paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{11-avss-c,\nauthor = {W. Starzyk and F.Z. Qureshi},\ntitle = {{Multi-tasking Smart Cameras for Intelligent Video Surveillance Systems}},\nbooktitle = {Proc. 8th IEEE International Conference on Advanced Video and Signal-Based Surveillance (AVSS 11)},\nyear = {2011},\npages = {6pp},\naddress = {Klagenfurt},\nmonth = aug,\nrating = {0},\ndate-added = {2014-04-09T15:57:22GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2011/Starzyk/2011%20Starzyk.pdf},\nfile = {{2011 Starzyk.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2011/Starzyk/2011 Starzyk.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/8792CD26-E9F4-44AA-A65D-980DEDC1CFFE}},\nurl_Paper = {pubs/11-avss-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Negotiating Privacy Preferences in Video Surveillance Systems.\n \n \n \n\n\n \n Barhm, M.; Qwasmi, N.; Qureshi, F.; and El-Khatib, K.\n\n\n \n\n\n\n In Proc. 24th International Conference on Industrial Engineering and Other Applications of Applied Intelligent Systems (IEA-AIE 2011), pages 511–521, Syracuse, NY, 2011. Springer-Verlag Berlin/Heidelberg\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{BQQ11,\nauthor = {M.S. Barhm and N. Qwasmi and F.Z. Qureshi and K. El-Khatib},\ntitle = {{Negotiating Privacy Preferences in Video Surveillance Systems}},\nbooktitle = {Proc. 24th International Conference on Industrial Engineering and Other Applications of Applied Intelligent Systems (IEA-AIE 2011)},\nyear = {2011},\npages = {511--521},\npublisher = {Springer-Verlag Berlin/Heidelberg},\naddress = {Syracuse, NY},\nrating = {0},\ndate-added = {2014-04-09T15:57:49GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2011/Barhm/2011%20Barhm.pdf},\nfile = {{2011 Barhm.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2011/Barhm/2011 Barhm.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/7A370CB1-C29F-4630-AED9-C37C0659C1B4}}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Virtual Vision: Virtual Reality Subserving Computer Vision Research for Camera Sensor Networks.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n Distributed Video Sensor Networks, pages 273–288. Bhanu, B.; Ravishankar, C.; Roy-Chowdhury, A.; Aghajan, H.; and Terzopoulos, D., editor(s). Springer, New York, 2011.\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InBook{10-dvsn-virtual_vision-ch,\n  author =    {F.Z. Qureshi and D. Terzopoulos},\n  editor =    {B. Bhanu and C.V. Ravishankar and A.K. Roy-Chowdhury and H. Aghajan and D. Terzopoulos},\n  title =        {Distributed Video Sensor Networks},\n  chapter =      {Virtual Vision: Virtual Reality Subserving Computer Vision Research for Camera Sensor Networks},\n  publisher =    {Springer},\n  year =         {2011},\n  address =   {New York},\n  pages =     {273--288},\n  url_Paper = {pubs/dvsn-virtual_vision-ch.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Proactive PTZ Control: A Cognitive Sensor Network That Plans Ahead.\n \n \n \n \n\n\n \n Terzopoulos, D.; and Qureshi, F.\n\n\n \n\n\n\n Distributed Video Sensor Networks, pages 163–178. Bhanu, B.; Ravishankar, C.; Roy-Chowdhury, A.; Aghajan, H.; and Terzopoulos, D., editor(s). Springer, New York, 2011.\n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InBook{10-dvsn-ptz_control-ch,\n  author =    {D. Terzopoulos and F.Z. Qureshi},\n  editor =    {B. Bhanu and C.V. Ravishankar and A.K. Roy-Chowdhury and H. Aghajan and D. Terzopoulos},\n  title =        {Distributed Video Sensor Networks},\n  chapter =      {Proactive PTZ Control: A Cognitive Sensor Network That Plans Ahead},\n  publisher =    {Springer},\n  year =         {2011},\n  address =   {New York},\n  pages =     {163--178},\n  url_Paper = {pubs/dvsn-ptz_control-ch.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Collaborative Sensing via Local Negotiations in Ad Hoc Networks of Smart Cameras.\n \n \n \n \n\n\n \n Qureshi, F.\n\n\n \n\n\n\n In Proc. 4th ACM/IEEE International Conference on Distributed Smart Cameras (ICDSC10), pages 8pp, Atlanta, September 2010. \n \n\n\n\n
\n\n\n\n \n \n \"Collaborative paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{10-icdsc-c,\nauthor = {F.Z. Qureshi},\ntitle = {{Collaborative Sensing via Local Negotiations in Ad Hoc Networks of Smart Cameras}},\nbooktitle = {Proc. 4th ACM/IEEE International Conference on Distributed Smart Cameras (ICDSC10)},\nyear = {2010},\npages = {8pp},\naddress = {Atlanta},\nmonth = sep,\nrating = {0},\ndate-added = {2014-04-09T16:01:38GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nuri = {\\url{papers3://publication/uuid/9654BA65-046B-43F3-890C-196C3371CF94}},\nurl_Paper = {pubs/10-icdsc-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n On the Role of Negotiations in Ad Hoc Networks of Smart Cameras.\n \n \n \n \n\n\n \n Qureshi, F.\n\n\n \n\n\n\n In IEEE International Conference on Distributed Computing in Sensor Systems (DCOSS 10), pages 1–2, Santa Barbara, June 2010. \n \n\n\n\n
\n\n\n\n \n \n \"On paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{10-dcoss-p,\nauthor = {F.Z. Qureshi},\ntitle = {{On the Role of Negotiations in Ad Hoc Networks of Smart Cameras}},\nbooktitle = {IEEE International Conference on Distributed Computing in Sensor Systems (DCOSS 10)},\nyear = {2010},\npages = {1--2},\naddress = {Santa Barbara},\nmonth = jun,\nrating = {0},\ndate-added = {2014-04-09T15:58:24GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2010/Qureshi/2010%20Qureshi.pdf},\nfile = {{2010 Qureshi.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2010/Qureshi/2010 Qureshi.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/26AD0E6B-3B2B-4132-A564-AA36EB7C3741}},\nurl_Paper = {pubs/10-dcoss-p.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Object-Video Streams for Preserving Privacy in Video Surveillance.\n \n \n \n \n\n\n \n Qureshi, F.\n\n\n \n\n\n\n In Proc. 6th International Conference on Advanced Video and Signal Based Surveillance (AVSS 09), pages 8pp, Genovo, Italy, September 2009. \n \n\n\n\n
\n\n\n\n \n \n \"Object-Video paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{09-avss-c,\nauthor = {F.Z. Qureshi},\ntitle = {{Object-Video Streams for Preserving Privacy in Video Surveillance}},\nbooktitle = {Proc. 6th International Conference on Advanced Video and Signal Based Surveillance (AVSS 09)},\nyear = {2009},\npages = {8pp},\naddress = {Genovo, Italy},\nmonth = sep,\nrating = {0},\ndate-added = {2014-04-09T15:58:59GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2009/Qureshi/2009%20Qureshi-1.pdf},\nfile = {{2009 Qureshi-1.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2009/Qureshi/2009 Qureshi-1.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/CFC94B85-9A05-40F2-9CEE-6D20437D5840}},\nurl_Paper = {pubs/09-avss-c.pdf},\nkeywords = {video-privacy}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Planning Ahead for PTZ Camera Assignment and Control.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n In Proc. Third ACM/IEEE International Conference on Distributed Smart Cameras (ICDSC 09), pages 1–8, Como, Italy, August 2009. \n \n\n\n\n
\n\n\n\n \n \n \"Planning paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{09-icdsc-c,\nauthor = {F.Z. Qureshi and D. Terzopoulos},\ntitle = {{Planning Ahead for PTZ Camera Assignment and Control}},\nbooktitle = {Proc. Third ACM/IEEE International Conference on Distributed Smart Cameras (ICDSC 09)},\nyear = {2009},\npages = {1--8},\naddress = {Como, Italy},\nmonth = aug,\nrating = {0},\ndate-added = {2014-04-09T15:55:26GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2009/Qureshi/2009%20Qureshi.pdf},\nfile = {{2009 Qureshi.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2009/Qureshi/2009 Qureshi.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/9FF69B53-62B0-4459-923A-E3E974A7BBF2}},\nurl_Paper = {pubs/10-icdsc-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Multi-camera Control through Constraint Satisfaction for Persistent Surveillance.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n In 2008 IEEE Fifth International Conference on Advanced Video and Signal Based Surveillance (AVSS), pages 211–218, Santa Fe, September 2008. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Multi-camera link\n  \n \n \n \"Multi-camera paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{08-avss-c,\nauthor = {F.Z. Qureshi and D. Terzopoulos},\ntitle = {{Multi-camera Control through Constraint Satisfaction for Persistent Surveillance}},\nbooktitle = {2008 IEEE Fifth International Conference on Advanced Video and Signal Based Surveillance (AVSS)},\nyear = {2008},\npages = {211--218},\npublisher = {IEEE},\naddress = {Santa Fe},\nmonth = sep,\ndoi = {10.1109/AVSS.2008.37},\nisbn = {978-0-7695-3341-4},\nrating = {0},\ndate-added = {2014-09-30T01:15:02GMT},\ndate-modified = {2014-09-30T01:16:22GMT},\nurl_Link = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4730414},\nuri = {\\url{papers3://publication/doi/10.1109/AVSS.2008.37}},\nurl_Paper = {pubs/08-avss-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Simulation Framework for Camera Sensor Networks Research.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n In Proc. 11th Communications and Networking Simulation Symposium (CNS 2008), pages 41–48, Ottawa, April 2008. \n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{08-cns-c,\nauthor = {F.Z. Qureshi and D. Terzopoulos},\ntitle = {{A Simulation Framework for Camera Sensor Networks Research}},\nbooktitle = {Proc. 11th Communications and Networking Simulation Symposium (CNS 2008)},\nyear = {2008},\npages = {41--48},\naddress = {Ottawa},\nmonth = apr,\nannote = {Electronic proceedings},\nrating = {0},\ndate-added = {2014-04-09T15:55:36GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2008/Qureshi/2008%20Qureshi.pdf},\nfile = {{2008 Qureshi.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2008/Qureshi/2008 Qureshi.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/084B9D0C-71B0-4EA4-B654-1C0DAC40E91D}},\nurl_Paper = {pubs/08-cns-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Smart Camera Networks in Virtual Reality.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n Proceedings of the IEEE (Special Issue on \"Smart Cameras\", 96(10): 1640–1656. October 2008.\n \n\n\n\n
\n\n\n\n \n \n \"Smart paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{08-pieee-j,\n  author =       {F.Z. Qureshi and D. Terzopoulos},\n  title =        {Smart Camera Networks in Virtual Reality},\n  journal =      {Proceedings of the IEEE (Special Issue on "Smart Cameras"},\n  year =         {2008},\n  volume =    {96},\n  number =    {10},\n  pages =     {1640--1656},\n  month =     {October},\n  url_Paper = {pubs/08-pieee-j-ieee.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Intelligent Perception and Control for Space Robotics: Autonomous Satellite Rendezvous and Docking.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n Journal of Machine Vision Applications, 19(3): 141–161. February 2008.\n \n\n\n\n
\n\n\n\n \n \n \"Intelligent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@Article{08-mva-j,\n  author =       {F.Z. Qureshi and D. Terzopoulos},\n  title =        {Intelligent Perception and Control for Space Robotics: Autonomous Satellite Rendezvous and Docking},\n  journal =      {Journal of Machine Vision Applications},\n  year =         {2008},\n  volume =    {19},\n  number =    {3},\n  pages =     {141--161},\n  month =     {February},\n  url_Paper = {pubs/08-mva-j.pdf},\n  keywords = {coco}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Virtual vision.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n In the 2007 ACM symposium, pages 247–248, Newport Beach, November 2007. ACM Press\n \n\n\n\n
\n\n\n\n \n \n \"Virtual link\n  \n \n \n \"Virtual paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{Qureshi:2007fl,\nauthor = {F.Z. Qureshi and D. Terzopoulos},\ntitle = {{Virtual vision}},\nbooktitle = {the 2007 ACM symposium},\nyear = {2007},\npages = {247--248},\npublisher = {ACM Press},\naddress = {Newport Beach},\nmonth = nov,\ndoi = {10.1145/1315184.1315243},\nisbn = {9781595938633},\nrating = {0},\ndate-added = {2014-09-30T01:18:51GMT},\ndate-modified = {2014-10-03T19:26:42GMT},\nurl_Link = {http://portal.acm.org/citation.cfm?doid=1315184.1315243},\nuri = {\\url{papers3://publication/doi/10.1145/1315184.1315243}},\nurl_Paper = {pubs/07-vrst-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Smart Camera Networks in Virtual Reality.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n In 2007 First ACM/IEEE International Conference on Distributed Smart Cameras, pages 87–94, Vienna, September 2007. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Smart link\n  \n \n \n \"Smart paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{07-icdsc-c,\nauthor = {F.Z. Qureshi and D. Terzopoulos},\ntitle = {{Smart Camera Networks in Virtual Reality}},\nbooktitle = {2007 First ACM/IEEE International Conference on Distributed Smart Cameras},\nyear = {2007},\npages = {87--94},\npublisher = {IEEE},\naddress = {Vienna},\nmonth = sep,\ndoi = {10.1109/ICDSC.2007.4357510},\nisbn = {978-1-4244-1353-9},\nrating = {0},\ndate-added = {2014-09-30T01:17:50GMT},\ndate-modified = {2014-10-03T19:26:42GMT},\nurl_Link = {http://ieeexplore.ieee.org/lpdocs/epic03/wrapper.htm?arnumber=4357510},\nuri = {\\url{papers3://publication/doi/10.1109/ICDSC.2007.4357510}},\nurl_Paper = {pubs/07-icdsc-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Surveillance in Virtual Reality: System Design and Multicamera Control.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n In Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR 07), pages 8pp, Minneapolis, MN, June 2007. \n \n\n\n\n
\n\n\n\n \n \n \"Surveillance paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{07-cvpr-c,\nauthor = {F.Z. Qureshi and D. Terzopoulos},\ntitle = {{Surveillance in Virtual Reality: System Design and Multicamera Control}},\nbooktitle = {Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR 07)},\nyear = {2007},\npages = {8pp},\naddress = {Minneapolis, MN},\nmonth = jun,\nrating = {0},\ndate-added = {2014-04-09T15:55:43GMT},\ndate-modified = {2014-09-30T00:51:54GMT},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2007/Qureshi/2007%20Qureshi.pdf},\nfile = {{2007 Qureshi.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2007/Qureshi/2007 Qureshi.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/4E72CF39-7AC6-4F01-A2C0-39F764DC2D96}},\nurl_Paper = {pubs/07-cvpr-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Distributed Coalition Formation in Visual Sensor Networks: A Virtual Vision Approach.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n In Aspnes, J; Scheideler, C; Arora, A; and Madden, S, editor(s), Proc. IEEE International Conference on Distributed Computing in Sensor Systems (DCOSS 2007), pages 1–20, Santa Fe, NM, June 2007. \n \n\n\n\n
\n\n\n\n \n \n \"Distributed paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{07-dcoss-c,\nauthor = {F.Z. Qureshi and D. Terzopoulos},\ntitle = {{Distributed Coalition Formation in Visual Sensor Networks: A Virtual Vision Approach}},\nbooktitle = {Proc. IEEE International Conference on Distributed Computing in Sensor Systems (DCOSS 2007)},\nyear = {2007},\neditor = {Aspnes, J and Scheideler, C and Arora, A and Madden, S},\npages = {1--20},\naddress = {Santa Fe, NM},\nmonth = jun,\nread = {Yes},\nrating = {0},\ndate-added = {2014-04-09T16:07:09GMT},\ndate-modified = {2014-10-03T19:08:51GMT},\nlocal-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2007/Qureshi/2007%20Qureshi-2.pdf},\nfile = {{2007 Qureshi-2.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2007/Qureshi/2007 Qureshi-2.pdf:application/pdf}},\nuri = {\\url{papers3://publication/uuid/BD40C0FA-1D8A-4F59-8305-D172EF90299C}},\nurl_Paper = {pubs/07-dcoss-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Intelligent Perception in Virtual Sensor Networks and Space Robotics.\n \n \n \n\n\n \n Qureshi, F.\n\n\n \n\n\n\n Ph.D. Thesis, Department of Computer Science, University of Toronto, January 2007.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@PhdThesis{phd-thesis,\n  author =       {F.Z. Qureshi},\n  title =        {Intelligent Perception in Virtual Sensor Networks and Space Robotics},\n  school =       {Department of Computer Science, University of Toronto},\n  year =         {2007},\n  month =     {January}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (2)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Virtual Vision and Smart Cameras Networks.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n In Working Notes of the International Workshop on Distributed Smart Cameras (DSC 2006) (Held in conjunction with the 4th ACM Conference on Embedded Networked Sensor Systems (SenSys 2006), pages 62–66, Boulder, CO, USA, October 2006. \n \n\n\n\n
\n\n\n\n \n \n \"Virtual paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{06-dsc-w,\n  author =       {F.Z. Qureshi and D. Terzopoulos},\n  title =        {Virtual Vision and Smart Cameras Networks},\n  key =       {},\n  booktitle = {Working Notes of the International Workshop on Distributed Smart Cameras (DSC 2006) (Held in conjunction with the 4th ACM Conference on Embedded Networked Sensor Systems (SenSys 2006)},\n  year =      {2006},\n  pages =     {62--66},\n  month =     {October},\n  address =   {Boulder, CO, USA},\n  url_Paper = {pubs/06-dsc-w.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Surveillance Camera Scheduling: A Virtual Vision Approach.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n ACM Multimedia Systems Journal, 12(3): 269–283. December 2006.\n \n\n\n\n
\n\n\n\n \n \n \"Surveillance paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@Article{06-mms-j,\n  author =       {F.Z. Qureshi and D. Terzopoulos},\n  title =        {Surveillance Camera Scheduling: A Virtual Vision Approach},\n  journal =      {ACM Multimedia Systems Journal},\n  year =         {2006},\n  volume =    {12},\n  number =    {3},\n  pages =     {269--283},\n  month =     {December},\n  url_Paper = {pubs/06-mms-j.pdf}\n}\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Surveillance Camera Scheduling: A Virtual Vision Approach.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n In Proc. Third ACM Workshop on Video Surveillance and Sensor Networks (VSSN 05), pages 131–139, Singapore, November 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Surveillance paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{05-vssn-w,\n  author =       {F.Z. Qureshi and D. Terzopoulos},\n  title =        {Surveillance Camera Scheduling: A Virtual Vision Approach},\n  booktitle = {Proc. Third ACM Workshop on Video Surveillance and Sensor Networks (VSSN 05)},\n  year =      {2005},\n  pages =     {131--139},\n  month =     {November},\n  address =   {Singapore},\n  url_Paper = {pubs/05-vssn-w.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Towards Intelligent Camera Networks: A Virtual Vision Approach.\n \n \n \n \n\n\n \n Qureshi, F.; and Terzopoulos, D.\n\n\n \n\n\n\n In Proc. Second Joint IEEE International Workshop on Visual Surveillance and Performance Evaluation of Tracking and Surveillance (VS-PETS 05), pages 177-184, Beijing, October 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Towards paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{05-pets-w,\n  author = {F.Z. Qureshi and D. Terzopoulos},\n  title =        {Towards Intelligent Camera Networks: A Virtual Vision Approach},\n  year =         {2005},\n  booktitle = {Proc. Second Joint IEEE International Workshop on Visual Surveillance and Performance Evaluation of Tracking and Surveillance (VS-PETS 05)},\n  address =   {Beijing},\n  month =     {October},\n  pages = {177-184},\n  url_Paper = {pubs/05-pets-w.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Cognitive Vision for Autonomous Satellite Rendezvous and Docking.\n \n \n \n \n\n\n \n Qureshi, F.; Terzopoulos, D.; and Jasiobedzki, P.\n\n\n \n\n\n\n In Proc. Ninth IAPR Conf. on Machine Vision Applications (MVA 2005), pages 314–319, Tsukuba Science City, Month 2005. \n \n\n\n\n
\n\n\n\n \n \n \"Cognitive paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{05-mva-c,\n  author =       {F.Z. Qureshi and D. Terzopoulos and P. Jasiobedzki},\n  title =        {Cognitive Vision for Autonomous Satellite Rendezvous and Docking},\n  booktitle = {Proc. Ninth IAPR Conf.~on Machine Vision Applications (MVA 2005)},\n  year =      {2005},\n  pages =     {314--319},\n  month =     {Month},\n  address =   {Tsukuba Science City},\n  url_Paper = {pubs/05-mva-c.pdf},\n  keywords = {coco}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Computer Vision System for Space-bourne Safety Monitoring.\n \n \n \n \n\n\n \n Qureshi, F.; Macrini, D.; Chung, D.; Maclean, J.; Dickinson, S.; and Jasiobedzki, P.\n\n\n \n\n\n\n In Proc. Eighth International Symposium on Artificial Intelligence, Robotics and Automation in Space (i-SAIRAS 2005), Munich, September 2005. \n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@InProceedings{05-isairas-c,\n  author =       {F.Z. Qureshi and D. Macrini and D. Chung and J. Maclean and S. Dickinson and P. Jasiobedzki},\n  title =        {A Computer Vision System for Space-bourne Safety Monitoring},\n  booktitle = {Proc. Eighth International Symposium on Artificial Intelligence, Robotics and Automation in Space (i-SAIRAS 2005)},\n  year =      {2005},\n  month =     {September},\n  address =   {Munich},\n  url_Paper = {pubs/05-isairas-c.pdf},\n  keywords = {coco}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2004\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A Cognitive Vision System for Space Robotics.\n \n \n \n \n\n\n \n Qureshi, F.; Terzopoulos, D.; and Jasiobedzki, P.\n\n\n \n\n\n\n In Clabian; Smutny; and Stanke, editor(s), Applications of Computer Vision Workshop. European Conference of Computer Vision, pages 120–128, Prague, Czech Republic, May 2004. Czech Technical University\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n\n\n\n
\n
@inproceedings{04-acv-w,\n  author = {F.Z. Qureshi and D. Terzopoulos and P. Jasiobedzki},\n  title = {{A Cognitive Vision System for Space Robotics}},\n  booktitle = {Applications of Computer Vision Workshop. European Conference of Computer Vision},\n  year = {2004},\n  editor = {Clabian and Smutny and Stanke},\n  pages = {120--128},\n  publisher = {Czech Technical University},\n  address = {Prague, Czech Republic},\n  month = may,\n  rating = {0},\n  date-added = {2014-04-09T15:58:53GMT},\n  date-modified = {2014-09-30T00:51:54GMT},\n  local-url = {file://localhost/Users/faisal/Dropbox/Library.papers3/Articles/2004/Qureshi/2004%20Qureshi.pdf},\n  file = {{2004 Qureshi.pdf:/Users/faisal/Dropbox/Library.papers3/Articles/2004/Qureshi/2004 Qureshi.pdf:application/pdf}},\n  uri = {\\url{papers3://publication/uuid/95DFDA04-4E5C-45A0-86A5-2BE61A376B1C}},\n  url_Paper = {pubs/04-acv-w.pdf},\n  keywords = {coco}\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2000\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Constructing Anatomically Accurate Face Models using Computed Tomography and Cyberware Data.\n \n \n \n\n\n \n Qureshi, F.\n\n\n \n\n\n\n Master's thesis, Department of Computer Science, University of Toronto, January 2000.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@MastersThesis{msc-thesis,\n  author =       {F.Z. Qureshi},\n  title =        {Constructing Anatomically Accurate Face Models using Computed Tomography and Cyberware Data},\n  school =       {Department of Computer Science, University of Toronto},\n  year =         {2000},\n  month =     {January}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1997\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Development of an Off-line Programming (OLP) System for a Serial Link Robot Manipulator.\n \n \n \n \n\n\n \n Qureshi, F.; Asif, M.; Ahmed, M.; and Rauf, A.\n\n\n \n\n\n\n In IEEE Conference (Pakistan Section), pages 4pp, 1997. \n \n\n\n\n
\n\n\n\n \n \n \"Development paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@InProceedings{Qur97,\n  author =       {F.Z. Qureshi and M. Asif and M. Ahmed and A. Rauf},\n  title =        {Development of an Off-line Programming (OLP) System for a\nSerial Link Robot Manipulator},\n  booktitle = {IEEE Conference (Pakistan Section)},\n  year =      {1997},\n  pages =     {4pp},\n  url_Paper = {pubs/97-ieee-c.pdf}\n}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);