A Joint Semantic Vector Representation Model for Text Clustering and Classification.
Momtazi, S; Rahbar, A; Salami, D; and Khanijazani, I
Journal of AI and Data Mining, 7(3): 443–450. 2019.
link
bibtex
@article{momtazi2019joint,
title={A Joint Semantic Vector Representation Model for Text Clustering and Classification},
author={Momtazi, S and Rahbar, A and Salami, D and Khanijazani, I},
journal={Journal of AI and Data Mining},
volume={7},
number={3},
pages={443--450},
year={2019},
publisher={Shahrood University of Technology}
}
High-Resolution Synchronous Digital Ballistocardiography Setup.
Jähne-Raden, N.; Kulau, U.; Gütschleg, H.; Clausen, T.; Jura, T.; Sigg, S.; and Wolf, L.
In
2019 Computing in Cardiology (CinC), 2019. IEEE
link
bibtex
@inproceedings{jahne2019high,
title={High-Resolution Synchronous Digital Ballistocardiography Setup},
author={Nico J{\"a}hne-Raden and Ulf Kulau and Henrike G{\"u}tschleg and Thiemo Clausen and Tobias Jura and Stephan Sigg and Lars Wolf},
booktitle={2019 Computing in Cardiology (CinC)},
year={2019},
group = {ambience},
organization={IEEE}
}
AF-DCGAN: Amplitude Feature Deep Convolutional GAN for Fingerprint Construction in Indoor Localization Systems.
Li, Q.; Qu, H.; Liu, Z.; Zhou, N.; Sun, W.; Sigg, S.; and Li, J.
IEEE Transactions on Emerging Topics in Computational Intelligence. 2019.
doi
link
bibtex
abstract
@article{Qiyue_2019_DCGAN,
author={Qiyue Li and Heng Qu and Zhi Liu and Nana Zhou and Wei Sun and Stephan Sigg and Jie Li},
journal={IEEE Transactions on Emerging Topics in Computational Intelligence},
title={AF-DCGAN: Amplitude Feature Deep Convolutional GAN for Fingerprint Construction in Indoor Localization Systems},
year={2019},
abstract = {With widely deployed WiFi network and the uniqueness feature (fingerprint) of wireless channel information, fingerprinting based WiFi positioning is currently the mainstream indoor positioning method, in which fingerprint database construction is crucial.
However, for accuracy, this approach requires enough data to be sampled at many reference points, which consumes excessive efforts and time.
In this paper, we collect Channel State Information (CSI) data at reference points by the method of device-free localization, then we convert collected CSI data into amplitude feature maps and extend the fingerprint
database using the proposed Amplitude-Feature Deep Convolutional Generative Adversarial Network (AF-DCGAN) model.
The use of AF-DCGAN accelerates convergence during the training phase, and substantially increases the diversity of the CSI amplitude feature map. The extended fingerprint database both reduces the human effort involved in fingerprint database construction and the accuracy of an indoor localization system, as demonstrated in the experiments.
},
doi = {10.1109/TETCI.2019.2948058},
project = {radiosense},
group = {ambience}
}
With widely deployed WiFi network and the uniqueness feature (fingerprint) of wireless channel information, fingerprinting based WiFi positioning is currently the mainstream indoor positioning method, in which fingerprint database construction is crucial. However, for accuracy, this approach requires enough data to be sampled at many reference points, which consumes excessive efforts and time. In this paper, we collect Channel State Information (CSI) data at reference points by the method of device-free localization, then we convert collected CSI data into amplitude feature maps and extend the fingerprint database using the proposed Amplitude-Feature Deep Convolutional Generative Adversarial Network (AF-DCGAN) model. The use of AF-DCGAN accelerates convergence during the training phase, and substantially increases the diversity of the CSI amplitude feature map. The extended fingerprint database both reduces the human effort involved in fingerprint database construction and the accuracy of an indoor localization system, as demonstrated in the experiments.
Hide my Gaze with EOG! Towards Closed-Eye Gaze Gesture Passwords that Resist Observation-Attacks with Electrooculography in Smart Glasses.
Findling, R. D.; Quddus, T.; and Sigg, S.
In
17th International Conference on Advances in Mobile Computing and Multimedia, 2019.
paper
link
bibtex
abstract
6 downloads
@InProceedings{Findling_19_HidemyGaze,
author = {Rainhard Dieter Findling and Tahmid Quddus and Stephan Sigg},
booktitle = {17th International Conference on Advances in Mobile Computing and Multimedia},
title = {Hide my Gaze with {EOG}! {T}owards Closed-Eye Gaze Gesture Passwords that Resist Observation-Attacks with Electrooculography in Smart Glasses},
year = {2019},
abstract = {Smart glasses allow for gaze gesture passwords as a hands-free form of mobile authentication. However, pupil movements for password input are easily observed by attackers, who thereby can derive the password. In this paper we investigate closed-eye gaze gesture passwords with EOG sensors in smart glasses. We propose an approach to detect and recognize closed-eye gaze gestures, together with a 7 and 9 character gaze gesture alphabet. Our evaluation indicates good gaze gesture detection rates. However, recognition is challenging specifically for vertical eye movements with 71.2\%-86.5\% accuracy and better results for opened than closed eyes. We further find that closed-eye gaze gesture passwords are difficult to attack from observations with 0% success rate in our evaluation, while attacks on open eye passwords succeed with 61\%. This indicates that closed-eye gaze gesture passwords protect the authentication secret significantly better than their open eye counterparts.},
url_Paper = {http://ambientintelligence.aalto.fi/paper/findling_closed_eye_eog.pdf},
project = {hidemygaze},
group = {ambience}
}
Smart glasses allow for gaze gesture passwords as a hands-free form of mobile authentication. However, pupil movements for password input are easily observed by attackers, who thereby can derive the password. In this paper we investigate closed-eye gaze gesture passwords with EOG sensors in smart glasses. We propose an approach to detect and recognize closed-eye gaze gestures, together with a 7 and 9 character gaze gesture alphabet. Our evaluation indicates good gaze gesture detection rates. However, recognition is challenging specifically for vertical eye movements with 71.2%-86.5% accuracy and better results for opened than closed eyes. We further find that closed-eye gaze gesture passwords are difficult to attack from observations with 0% success rate in our evaluation, while attacks on open eye passwords succeed with 61%. This indicates that closed-eye gaze gesture passwords protect the authentication secret significantly better than their open eye counterparts.
Tennis Stroke Classification: Comparing Wrist and Racket as IMU Sensor Position.
Ebner, C. J.; and Findling, R. D.
In
17th International Conference on Advances in Mobile Computing and Multimedia, 2019.
paper
link
bibtex
abstract
1 download
@InProceedings{Ebner_19_TennisStrokeClassification,
author = {Christopher J. Ebner and Rainhard Dieter Findling},
booktitle = {17th International Conference on Advances in Mobile Computing and Multimedia},
title = {Tennis Stroke Classification: Comparing Wrist and Racket as IMU Sensor Position},
year = {2019},
abstract = {Automatic tennis stroke recognition can help tennis players improve their training experience. Previous work has used sensors positions on both wrist and tennis racket, of which different physiological aspects bring different sensing capabilities. However, no comparison of the performance of both positions has been done yet. In this paper we comparatively assess wrist and racket sensor positions for tennis stroke detection and classification. We investigate detection and classification rates with 8 well-known stroke types and visualize their differences in 3D acceleration and angular velocity. Our stroke detection utilizes a peak detection with thresholding and windowing on the derivative of sensed acceleration, while for our stroke recognition we evaluate different feature sets and classification models. Despite the different physiological aspects of wrist and racket as sensor position, for a controlled environment results indicate similar performance in both stroke detection (98.5\%-99.5\%) and user-dependent and independent classification (89\%-99\%).},
url_Paper = {http://ambientintelligence.aalto.fi/paper/Tennis_Stroke_Recognition.pdf},
group = {ambience}}
Automatic tennis stroke recognition can help tennis players improve their training experience. Previous work has used sensors positions on both wrist and tennis racket, of which different physiological aspects bring different sensing capabilities. However, no comparison of the performance of both positions has been done yet. In this paper we comparatively assess wrist and racket sensor positions for tennis stroke detection and classification. We investigate detection and classification rates with 8 well-known stroke types and visualize their differences in 3D acceleration and angular velocity. Our stroke detection utilizes a peak detection with thresholding and windowing on the derivative of sensed acceleration, while for our stroke recognition we evaluate different feature sets and classification models. Despite the different physiological aspects of wrist and racket as sensor position, for a controlled environment results indicate similar performance in both stroke detection (98.5%-99.5%) and user-dependent and independent classification (89%-99%).
CORMORANT: On Implementing Risk-Aware Multi-Modal Biometric Cross-Device Authentication For Android.
Hintze, D.; Füller, M.; Scholz, S.; Findling, R. D.; Muaaz, M.; Kapfer, P.; Nüssler, W.; and Mayrhofer, R.
In
17th International Conference on Advances in Mobile Computing and Multimedia, 2019.
paper
link
bibtex
abstract
2 downloads
@InProceedings{Hintze_19_CORMORANTImplementingRisk,
author = {Daniel Hintze and Matthias F\"uller and Sebastian Scholz and Rainhard Dieter Findling and Muhammad Muaaz and Philipp Kapfer and Wilhelm N\"ussler and Ren\'e Mayrhofer},
booktitle = {17th International Conference on Advances in Mobile Computing and Multimedia},
title = {CORMORANT: On Implementing Risk-Aware Multi-Modal Biometric Cross-Device Authentication For Android},
year = {2019},
abstract = {This paper presents the design and open source implementation of CORMORANT , an Android authentication framework able to increase usability and security of mobile authentication. It uses transparent behavioral and physiological biometrics like gait, face, voice, and keystrokes dynamics to continuously evaluate the user’s identity without explicit interaction. Using signals like location, time of day, and nearby devices to assess the risk of unauthorized access, the required level of confidence in the user’s identity is dynamically adjusted. Authentication results are shared securely, end-to-end encrypted using the Signal messaging protocol, with trusted devices to facilitate cross-device authentication for co-located devices, detected using Bluetooth low energy beacons. CORMORANT is able to reduce the authentication overhead by up to 97\% compared to conventional knowledge-based authentication whilst increasing security at the same time. We share our perspective on some of the successes and shortcomings we encountered implementing and evaluating CORMORANT to hope to inform others working on similar projects.},
url_Paper = {http://ambientintelligence.aalto.fi/paper/Hintze_19_CORMORANTImplementingRisk_cameraReady.pdf},
group = {ambience}}
This paper presents the design and open source implementation of CORMORANT , an Android authentication framework able to increase usability and security of mobile authentication. It uses transparent behavioral and physiological biometrics like gait, face, voice, and keystrokes dynamics to continuously evaluate the user’s identity without explicit interaction. Using signals like location, time of day, and nearby devices to assess the risk of unauthorized access, the required level of confidence in the user’s identity is dynamically adjusted. Authentication results are shared securely, end-to-end encrypted using the Signal messaging protocol, with trusted devices to facilitate cross-device authentication for co-located devices, detected using Bluetooth low energy beacons. CORMORANT is able to reduce the authentication overhead by up to 97% compared to conventional knowledge-based authentication whilst increasing security at the same time. We share our perspective on some of the successes and shortcomings we encountered implementing and evaluating CORMORANT to hope to inform others working on similar projects.
Predicting the Category of Fire Department Operations.
Pirklbauer, K.; and Findling, R. D.
In
Emerging Research Projects and Show Cases Symposium (SHOW 2019), 2019.
paper
link
bibtex
abstract
1 download
@InProceedings{Pirklbauer_19_PredictingCategoryFire,
author = {Kevin Pirklbauer and Rainhard Dieter Findling},
booktitle = {Emerging Research Projects and Show Cases Symposium ({SHOW} 2019)},
title = {Predicting the Category of Fire Department Operations},
year = {2019},
abstract = {Voluntary fire departments have limited human and material resources. Machine learning aided prediction of fire department operation details can benefit their resource planning and distribution. While there is previous work on predicting certain aspects of operations within a given operation category, operation categories themselves have not been predicted yet. In this paper we propose an approach to fire department operation category prediction based on location, time, and weather information, and compare the performance of multiple machine learning models with cross validation. To evaluate our approach, we use two years of fire department data from Upper Austria, featuring 16.827 individual operations, and predict its major three operation categories. Preliminary results indicate a prediction accuracy of 61\%. While this performance is already noticeably better than uninformed prediction (34% accuracy), we intend to further reduce the prediction error utilizingmore sophisticated features and models.},
url_Paper = {http://ambientintelligence.aalto.fi/paper/momm2019_fire_department_operation_prediction.pdf},
group = {ambience}
}
Voluntary fire departments have limited human and material resources. Machine learning aided prediction of fire department operation details can benefit their resource planning and distribution. While there is previous work on predicting certain aspects of operations within a given operation category, operation categories themselves have not been predicted yet. In this paper we propose an approach to fire department operation category prediction based on location, time, and weather information, and compare the performance of multiple machine learning models with cross validation. To evaluate our approach, we use two years of fire department data from Upper Austria, featuring 16.827 individual operations, and predict its major three operation categories. Preliminary results indicate a prediction accuracy of 61%. While this performance is already noticeably better than uninformed prediction (34% accuracy), we intend to further reduce the prediction error utilizingmore sophisticated features and models.
Capturing human-machine interaction events from radio sensors in Industry 4.0 environments.
Sigg, S.; Palipana, S.; Savazzi, S.; and Kianoush, S.
In
International Conference on Business Process Management (adjunct), 2019.
link
bibtex
abstract
@InProceedings{Sigg_2019_miel,
author={Stephan Sigg and Sameera Palipana and Stefano Savazzi and Sanaz Kianoush},
booktitle={International Conference on Business Process Management (adjunct)},
title={Capturing human-machine interaction events from radio sensors in Industry 4.0 environments},
year={2019},
abstract={In manufacturing environments, human workers interact with increasingly autonomous machinery.
To ensure workspace safety and production efficiency during human-robot cooperation, continuous and accurate tracking and perception of workers activities is required.
The RadioSense project intends to move forward the state-of-the-art in advanced sensing and perception for next generation manufacturing workspace.
In this paper, we describe our ongoing efforts towards multi-subject recognition cases with multiple persons conducting several simultaneous activities.
Perturbations induced by moving bodies/objects on the electro-magnetic wavefield can be processed for environmental perception.
In particular, we will adopt next generation (5G) high-frequency technologies as well as distributed massive MIMO systems.
},
project = {radiosense},
group = {ambience}}
In manufacturing environments, human workers interact with increasingly autonomous machinery. To ensure workspace safety and production efficiency during human-robot cooperation, continuous and accurate tracking and perception of workers activities is required. The RadioSense project intends to move forward the state-of-the-art in advanced sensing and perception for next generation manufacturing workspace. In this paper, we describe our ongoing efforts towards multi-subject recognition cases with multiple persons conducting several simultaneous activities. Perturbations induced by moving bodies/objects on the electro-magnetic wavefield can be processed for environmental perception. In particular, we will adopt next generation (5G) high-frequency technologies as well as distributed massive MIMO systems.
With whom are you talking? Privacy in Speech Interfaces.
Backstrom, T.; Das, S.; Zarazaga, P. P.; Sigg, S.; Findling, R.; and Laakasuo, M.
In
Proceedings of the 4th annual conference of the MyData Global network (MyData 2019), Helsinki, Finland, September 2019.
link
bibtex
abstract
@inproceedings{Backstrom_2019_MyData,
author = {Tom Backstrom and Sneha Das and Pablo Perez Zarazaga and Stephan Sigg and Rainhard Findling and Michael Laakasuo},
title = {With whom are you talking? Privacy in Speech Interfaces},
booktitle = {Proceedings of the 4th annual conference of the MyData Global network ({MyData} 2019)},
year = {2019},
address = {Helsinki, Finland},
month = sep,
abstract = {Speech is about interaction. It is more than just passing messages – the listener nods and finishes the sentence for you. Interaction is so essentially a part of normal speech, that non-interactive speech has its own name: it is a monologue. It's not normal. Normal speech is about interaction. Privacy is a very natural part of such spoken interactions. We intuitively lower our voices to a whisper when we want to tell a secret. We thus change the way we speak depending on the level of privacy. In a public speech, we would not reveal intimate secrets. We thus change the content of our speech depending on the level of privacy. Furthermore, in a cafeteria, we would match our speaking volume to the background noise. We therefore change our speech in an interaction with the surroundings. Overall, we change both the manner of speaking and its content, in an interaction with our environment. Our research team is interested in the question of how such notions of privacy should be taken into account in the design of speech interfaces, such as Alexa/Amazon, Siri/Apple, Google and Mycroft. We believe that in the design of good user-interfaces, you should strive for technology which is intuitive to use. If your speech assistant handles privacy in a similar way as a natural person does, then most likely it would feel natural to the user. A key concept for us is modelling the users’ experience of privacy. Technology should understand our feelings towards privacy, how we experience it and act accordingly. From the myData-perspective, this means that all (speech) data is about interactions, between two or more parties. Ownership of such data is then also shared among the participating parties. There is no singular owner of data, but access and management of data must always happen in mutual agreement. In fact, the same applies to many other media as well. It is obvious that chatting on WhatsApp is a shared experience. Interesting (=good) photographs are those which entail a story; "This is when we went to the beach with Sophie." The myData concept should be adapted to take into account such frequently appearing real-life data. In our view, data becomes more interesting when it is about an interaction. In other words, since interaction is so central to our understanding of the world, it should then also be reflected in our data representations. To include the most significant data, we should turn our attention from myData to focus on ourData. Here, the importance of data is then dependent on, and even defined by, with whom are you talking?},
group = {ambience}}
Speech is about interaction. It is more than just passing messages – the listener nods and finishes the sentence for you. Interaction is so essentially a part of normal speech, that non-interactive speech has its own name: it is a monologue. It's not normal. Normal speech is about interaction. Privacy is a very natural part of such spoken interactions. We intuitively lower our voices to a whisper when we want to tell a secret. We thus change the way we speak depending on the level of privacy. In a public speech, we would not reveal intimate secrets. We thus change the content of our speech depending on the level of privacy. Furthermore, in a cafeteria, we would match our speaking volume to the background noise. We therefore change our speech in an interaction with the surroundings. Overall, we change both the manner of speaking and its content, in an interaction with our environment. Our research team is interested in the question of how such notions of privacy should be taken into account in the design of speech interfaces, such as Alexa/Amazon, Siri/Apple, Google and Mycroft. We believe that in the design of good user-interfaces, you should strive for technology which is intuitive to use. If your speech assistant handles privacy in a similar way as a natural person does, then most likely it would feel natural to the user. A key concept for us is modelling the users’ experience of privacy. Technology should understand our feelings towards privacy, how we experience it and act accordingly. From the myData-perspective, this means that all (speech) data is about interactions, between two or more parties. Ownership of such data is then also shared among the participating parties. There is no singular owner of data, but access and management of data must always happen in mutual agreement. In fact, the same applies to many other media as well. It is obvious that chatting on WhatsApp is a shared experience. Interesting (=good) photographs are those which entail a story; "This is when we went to the beach with Sophie." The myData concept should be adapted to take into account such frequently appearing real-life data. In our view, data becomes more interesting when it is about an interaction. In other words, since interaction is so central to our understanding of the world, it should then also be reflected in our data representations. To include the most significant data, we should turn our attention from myData to focus on ourData. Here, the importance of data is then dependent on, and even defined by, with whom are you talking?
Robust and Responsive Acoustic Pairing of Devices Using Decorrelating Time-Frequency Modelling.
Perez, P.; Backstrom, T.; and Sigg, S.
In
27th European Signal Processing Conference (EUSIPCO), 2019.
paper
link
bibtex
abstract
4 downloads
@InProceedings{Perez_2019_eusipco,
author={Pablo Perez and Tom Backstrom and Stephan Sigg},
title={Robust and Responsive Acoustic Pairing of Devices Using Decorrelating Time-Frequency Modelling},
booktitle={27th European Signal Processing Conference (EUSIPCO)},
year={2019},
abstract = {Voice user interfaces have increased in popularity, as
they enable natural interaction with different applications using
one’s voice. To improve their usability and audio quality, several
devices could interact to provide a unified voice user interface.
However, with devices cooperating and sharing voice-related
information, user privacy may be at risk. Therefore, access
management rules that preserve user privacy are important.
State-of-the-art methods for acoustic pairing of devices provide
fingerprinting based on the time-frequency representation of the
acoustic signal and error-correction. We propose to use such
acoustic fingerprinting to authorise devices which are acoustically
close. We aim to obtain fingerprints of ambient audio adapted to
the requirements of voice user interfaces. Our experiments show
that the responsiveness and robustness is improved by combining
overlapping windows and decorrelating transforms.},
url_Paper = {http://ambientintelligence.aalto.fi/paper/perezEusipco2019.pdf},
group = {ambience}}
Voice user interfaces have increased in popularity, as they enable natural interaction with different applications using one’s voice. To improve their usability and audio quality, several devices could interact to provide a unified voice user interface. However, with devices cooperating and sharing voice-related information, user privacy may be at risk. Therefore, access management rules that preserve user privacy are important. State-of-the-art methods for acoustic pairing of devices provide fingerprinting based on the time-frequency representation of the acoustic signal and error-correction. We propose to use such acoustic fingerprinting to authorise devices which are acoustically close. We aim to obtain fingerprints of ambient audio adapted to the requirements of voice user interfaces. Our experiments show that the responsiveness and robustness is improved by combining overlapping windows and decorrelating transforms.
Learning a Classification Model over Vertically-Partitioned Healthcare Data.
Nguyen, L. N.; and Sigg, S.
IEEE Multimedia Communications – Frontiers, SI on Social and Mobile Connected Smart Objects. 2019.
link
bibtex
@article{Le_2019_multimedia,
author={Le Ngu Nguyen and Stephan Sigg},
journal={IEEE Multimedia Communications -- Frontiers, SI on Social and Mobile Connected Smart Objects},
title={Learning a Classification Model over Vertically-Partitioned Healthcare Data},
year={2019},
project = {abacus},
group = {ambience}}
Chorus: UWB concurrent transmissions for GPS-like passive localization of countless targets.
Corbalán, P.; Picco, G. P.; and Palipana, S.
In
Proceedings of the 18th International Conference on Information Processing in Sensor Networks, pages 133–144, 2019. ACM
link
bibtex
@inproceedings{corbalan2019chorus,
title={Chorus: UWB concurrent transmissions for GPS-like passive localization of countless targets},
author={Corbal{\'a}n, Pablo and Picco, Gian Pietro and Palipana, Sameera},
booktitle={Proceedings of the 18th International Conference on Information Processing in Sensor Networks},
pages={133--144},
year={2019},
organization={ACM},
group = {ambience}}
On the use of stray wireless signals for sensing: a look beyond 5G for the next generation industry.
Savazzi, S.; Sigg, S.; Vicentini, F.; Kianoush, S.; and Findling, R.
IEEE Computer, SI on on Transformative Computing and Communication, 52(7): 25-36. 2019.
doi
link
bibtex
abstract
@article{Savazzi_2019_transformative,
author={Stefano Savazzi and Stephan Sigg and Federico Vicentini and Sanaz Kianoush and Rainhard Findling},
journal={IEEE Computer, SI on on Transformative Computing and Communication},
title={On the use of stray wireless signals for sensing: a look beyond 5G for the next generation industry},
year={2019},
number = {7},
pages = {25-36},
volume = {52},
doi = {10.1109/MC.2019.2913626},
abstract = {Transformative techniques to capture and process wireless stray radiation originated from different radio sources are gaining increasing attention. They can be applied to human sensing, behavior recognition, localization and mapping. The omnipresent radio-frequency (RF) stray radiation of wireless devices (WiFi, Cellular or any Personal/Body Area Network) encodes a 3D view of all objects traversed by its propagation. A trained machine learning model is then applied to features extracted in real-time from radio signals to isolate body-induced footprints or environmental alterations. The technology can augment and transform existing radio-devices into ubiquitously distributed sensors that simultaneously act as wireless transmitters and receivers (e.g. fast time-multiplexed). Thereby, 5G-empowered tiny device networks transform into a dense web of RF-imaging links that extract a view of an environment, for instance, to monitor manufacturing processes in next generation industrial set-ups (Industry 4.0, I4.0). This article highlights emerging transformative computing tools for radio sensing, promotes key technology enablers in 5G communication and reports deployment experiences.},
project = {radiosense},
group = {ambience}}
Transformative techniques to capture and process wireless stray radiation originated from different radio sources are gaining increasing attention. They can be applied to human sensing, behavior recognition, localization and mapping. The omnipresent radio-frequency (RF) stray radiation of wireless devices (WiFi, Cellular or any Personal/Body Area Network) encodes a 3D view of all objects traversed by its propagation. A trained machine learning model is then applied to features extracted in real-time from radio signals to isolate body-induced footprints or environmental alterations. The technology can augment and transform existing radio-devices into ubiquitously distributed sensors that simultaneously act as wireless transmitters and receivers (e.g. fast time-multiplexed). Thereby, 5G-empowered tiny device networks transform into a dense web of RF-imaging links that extract a view of an environment, for instance, to monitor manufacturing processes in next generation industrial set-ups (Industry 4.0, I4.0). This article highlights emerging transformative computing tools for radio sensing, promotes key technology enablers in 5G communication and reports deployment experiences.
CORMORANT: Ubiquitous Risk-Aware Multi-Modal Biometric Authentication Across Mobile Devices.
Hintze, D.; Füller, M.; Scholz, S.; Findling, R.; Muaaz, M.; Kapfer, P.; and Mayrhofer, E. K. R.
Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT). September 2019.
doi
link
bibtex
abstract
@article{Hintze_2019_Ubicomp,
author = {Daniel Hintze and Matthias F\"uller and Sebastian Scholz and Rainhard Findling and Muhammad Muaaz and Philipp Kapfer and Eckhard Kochand Ren\'{e} Mayrhofer},
journal = {Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies (IMWUT)},
title = {CORMORANT: Ubiquitous Risk-Aware Multi-Modal Biometric Authentication Across Mobile Devices},
year = {2019},
month = sep,
abstract = {People own and carry an increasing number of ubiquitous mobile devices, such as smartphones, tablets, and notebooks. Being
small and mobile, those devices have a high propensity to become lost or stolen. Since mobile devices provide access to their
owners’ digital lives, strong authentication is vital to protect sensitive information and services against unauthorized access.
However, at least one in three devices is unprotected, with inconvenience of traditional authentication being the paramount
reason. We present the concept of CORMORANT , an approach to significantly reduce the manual burden of mobile user
verification through risk-aware, multi-modal biometric, cross-device authentication. Transparent behavioral and physiological
biometrics like gait, voice, face, and keystroke dynamics are used to continuously evaluate the user’s identity without explicit
interaction. The required level of confidence in the user’s identity is dynamically adjusted based on the risk of unauthorized
access derived from signals like location, time of day and nearby devices. Authentication results are shared securely with
trusted devices to facilitate cross-device authentication for co-located devices. Conducting a large-scale agent-based simulation
of 4 000 users based on more than 720 000 days of real-world device usage traces and 6.7 million simulated robberies and thefts
sourced from police reports, we found the proposed approach is able to reduce the frequency of password entries required on
smartphones by 97.82% whilst simultaneously reducing the risk of unauthorized access in the event of a crime by 97.72%,
compared to conventional knowledge-based authentication.
},
doi={10.1145/2800835.2800906},
group = {ambience}}
People own and carry an increasing number of ubiquitous mobile devices, such as smartphones, tablets, and notebooks. Being small and mobile, those devices have a high propensity to become lost or stolen. Since mobile devices provide access to their owners’ digital lives, strong authentication is vital to protect sensitive information and services against unauthorized access. However, at least one in three devices is unprotected, with inconvenience of traditional authentication being the paramount reason. We present the concept of CORMORANT , an approach to significantly reduce the manual burden of mobile user verification through risk-aware, multi-modal biometric, cross-device authentication. Transparent behavioral and physiological biometrics like gait, voice, face, and keystroke dynamics are used to continuously evaluate the user’s identity without explicit interaction. The required level of confidence in the user’s identity is dynamically adjusted based on the risk of unauthorized access derived from signals like location, time of day and nearby devices. Authentication results are shared securely with trusted devices to facilitate cross-device authentication for co-located devices. Conducting a large-scale agent-based simulation of 4 000 users based on more than 720 000 days of real-world device usage traces and 6.7 million simulated robberies and thefts sourced from police reports, we found the proposed approach is able to reduce the frequency of password entries required on smartphones by 97.82% whilst simultaneously reducing the risk of unauthorized access in the event of a crime by 97.72%, compared to conventional knowledge-based authentication.
Closed-Eye Gaze Gestures: Detection and Recognition of Closed-Eye Movements with Cameras in Smart Glasses.
Findling, R. D.; Nguyen, L. N.; and Sigg, S.
In
International Work-Conference on Artificial Neural Networks, 2019.
paper
doi
link
bibtex
abstract
6 downloads
@InProceedings{Rainhard_2019_iwann,
author={Rainhard Dieter Findling and Le Ngu Nguyen and Stephan Sigg},
title={Closed-Eye Gaze Gestures: Detection and Recognition of Closed-Eye Movements with Cameras in Smart Glasses},
booktitle={International Work-Conference on Artificial Neural Networks},
year={2019},
doi = {10.1007/978-3-030-20521-8_27},
abstract ={Gaze gestures bear potential for user input with mobile devices, especially smart glasses, due to being always available and hands-free. So far, gaze gesture recognition approaches have utilized open-eye movements only and disregarded closed-eye movements. This paper is a first investigation of the feasibility of detecting and recognizing closed-eye gaze gestures from close-up optical sources, e.g. eye-facing cameras embedded in smart glasses. We propose four different closed-eye gaze gesture protocols, which extend the alphabet of existing open-eye gaze gesture approaches. We further propose a methodology for detecting and extracting the corresponding closed-eye movements with full optical flow, time series processing, and machine learning. In the evaluation of the four protocols we find closed-eye gaze gestures to be detected 82.8%-91.6% of the time, and extracted gestures to be recognized correctly with an accuracy of 92.9%-99.2%.},
url_Paper = {http://ambientintelligence.aalto.fi/findling/pdfs/publications/Findling_19_ClosedEyeGaze.pdf},
project = {hidemygaze},
group = {ambience}}
Gaze gestures bear potential for user input with mobile devices, especially smart glasses, due to being always available and hands-free. So far, gaze gesture recognition approaches have utilized open-eye movements only and disregarded closed-eye movements. This paper is a first investigation of the feasibility of detecting and recognizing closed-eye gaze gestures from close-up optical sources, e.g. eye-facing cameras embedded in smart glasses. We propose four different closed-eye gaze gesture protocols, which extend the alphabet of existing open-eye gaze gesture approaches. We further propose a methodology for detecting and extracting the corresponding closed-eye movements with full optical flow, time series processing, and machine learning. In the evaluation of the four protocols we find closed-eye gaze gestures to be detected 82.8%-91.6% of the time, and extracted gestures to be recognized correctly with an accuracy of 92.9%-99.2%.
Workout Type Recognition and Repetition Counting with CNNs from 3D Acceleration Sensed on the Chest.
Skawinski, K.; Roca, F. M.; Findling, R. D.; and Sigg, S.
In
International Work-Conference on Artificial Neural Networks, volume 11506, of
LNCS, pages 347–359, June 2019.
paper
doi
link
bibtex
abstract
@InProceedings{Ferran_2019_iwann,
author={Kacper Skawinski and Ferran Montraveta Roca and Rainhard Dieter Findling and Stephan Sigg},
title={Workout Type Recognition and Repetition Counting with CNNs from 3D Acceleration Sensed on the Chest},
booktitle={International Work-Conference on Artificial Neural Networks},
year={2019},
doi = {10.1007/978-3-030-20521-8_29},
volume = {11506},
series = {LNCS},
pages = {347--359},
month = jun,
abstract = {Sports and workout activities have become important parts of modern life. Nowadays, many people track characteristics about their sport activities with their mobile devices, which feature inertial measurement unit (IMU) sensors. In this paper we present a methodology to detect and recognize workout, as well as to count repetitions done in a recognized type of workout, from a single 3D accelerometer worn at the chest. We consider four different types of workout (pushups, situps, squats and jumping jacks). Our technical approach to workout type recognition and repetition counting is based on machine learning with a convolutional neural network. Our evaluation utilizes data of 10 subjects, which wear a Movesense sensors on their chest during their workout. We thereby find that workouts are recognized correctly on average 89.9% of the time, and the workout repetition counting yields an average detection accuracy of 97.9% over all types of workout.},
url_Paper = {http://ambientintelligence.aalto.fi/findling/pdfs/publications/Skawinski_19_WorkoutTypeRecognition.pdf},
group = {ambience}}
Sports and workout activities have become important parts of modern life. Nowadays, many people track characteristics about their sport activities with their mobile devices, which feature inertial measurement unit (IMU) sensors. In this paper we present a methodology to detect and recognize workout, as well as to count repetitions done in a recognized type of workout, from a single 3D accelerometer worn at the chest. We consider four different types of workout (pushups, situps, squats and jumping jacks). Our technical approach to workout type recognition and repetition counting is based on machine learning with a convolutional neural network. Our evaluation utilizes data of 10 subjects, which wear a Movesense sensors on their chest during their workout. We thereby find that workouts are recognized correctly on average 89.9% of the time, and the workout repetition counting yields an average detection accuracy of 97.9% over all types of workout.
Wireless Multi-frequency Feature Set to Simplify Human 3D Pose Estimation.
Raja, M.; Hughes, A.; Xu, Y.; zarei , P.; Michelson, D. G.; and Sigg, S.
IEEE Antennas and Wireless Propagation letters, 18(5): 876-880. 2019.
doi
link
bibtex
abstract
@article{Raja_2019_antenna,
author={Muneeba Raja and Aidan Hughes and Yixuan Xu and Parham zarei and David G. Michelson and Stephan Sigg},
journal={IEEE Antennas and Wireless Propagation letters},
title={Wireless Multi-frequency Feature Set to Simplify Human 3D Pose Estimation},
year={2019},
volume={18},
number={5},
pages={876-880},
doi = {10.1109/LAWP.2019.2904580},
abstract = {We present a multifrequency feature set to detect driver's three-dimensional (3-D) head and torso movements from fluctuations in the radio frequency channel due to body movements. Current features used for movement detection are based on the time-of-flight, received signal strength, and channel state information and come with the limitations of coarse tracking, sensitivity toward multipath effects, and handling corrupted phase data, respectively. There is no standalone feature set that accurately detects small and large movements and determines the direction in 3-D space. We resolve this problem by using two radio signals at widely separated frequencies in a monostatic configuration. By combining information about displacement, velocity, and direction of movements derived from the Doppler effect at each frequency, we expand the number of existing features. We separate pitch, roll, and yaw movements of head from torso and arm. The extracted feature set is used to train a K-Nearest Neighbor classification algorithm, which could provide behavioral awareness to cars while being less invasive as compared to camera-based systems. The training results on data from four participants reveal that the classification accuracy is 77.4% at 1.8 GHz, it is 87.4% at 30 GHz, and multifrequency feature set improves the accuracy to 92%.},
project = {radiosense},
group = {ambience}}
We present a multifrequency feature set to detect driver's three-dimensional (3-D) head and torso movements from fluctuations in the radio frequency channel due to body movements. Current features used for movement detection are based on the time-of-flight, received signal strength, and channel state information and come with the limitations of coarse tracking, sensitivity toward multipath effects, and handling corrupted phase data, respectively. There is no standalone feature set that accurately detects small and large movements and determines the direction in 3-D space. We resolve this problem by using two radio signals at widely separated frequencies in a monostatic configuration. By combining information about displacement, velocity, and direction of movements derived from the Doppler effect at each frequency, we expand the number of existing features. We separate pitch, roll, and yaw movements of head from torso and arm. The extracted feature set is used to train a K-Nearest Neighbor classification algorithm, which could provide behavioral awareness to cars while being less invasive as compared to camera-based systems. The training results on data from four participants reveal that the classification accuracy is 77.4% at 1.8 GHz, it is 87.4% at 30 GHz, and multifrequency feature set improves the accuracy to 92%.
Learning with Vertically Partitioned Data, Binary Feedback and Random Parameter update.
Nguyen, L. N.; and Sigg, S.
In
Workshop on Hot Topics in Social and Mobile Connected Smart Objects, in conjuction with IEEE International Conference on Computer Communications (INFOCOM), 2019.
link
bibtex
@InProceedings{Le_2019_hotsalsa,
author={Le Ngu Nguyen and Stephan Sigg},
title={Learning with Vertically Partitioned Data, Binary Feedback and Random Parameter update},
booktitle={Workshop on Hot Topics in Social and Mobile Connected Smart Objects, in conjuction with IEEE International Conference on Computer Communications (INFOCOM)},
year={2019},
project = {abacus},
group = {ambience}}
Exploiting usage to predict instantaneous app popularity: Trend filters and retention rates.
Sigg, S.; Peltonen, E.; Lagerspetz, E.; Nurmi, P.; and Tarkoma, S.
ACM Transactions on the WEB. 2019.
doi
link
bibtex
abstract
1 download
@article{Sigg_2019_tweb,
author={Stephan Sigg and Ella Peltonen and Eemil Lagerspetz and Petteri Nurmi and Sasu Tarkoma},
journal={ACM Transactions on the WEB},
title={Exploiting usage to predict instantaneous app popularity: Trend filters and retention rates},
year={2019},
abstract = {Popularity of mobile apps is traditionally measured by metrics such as the number of downloads, installations, or user ratings. A problem with these measures is that they reflect usage only indirectly. Indeed, retention rates, i.e., the number of days users continue to interact with an installed app, have been suggested to predict successful app lifecycles. We conduct the first independent and large-scale study of retention rates and usage trends on a dataset of app-usage data from a community of 339,842 users and more than 213,667 apps. Our analysis shows that, on average, applications lose 65% of their users in the first week, while very popular applications (top 100) lose only 35%. It also reveals, however, that many applications have more complex usage behaviour patterns due to seasonality, marketing, or other factors. To capture such effects, we develop a novel app-usage trend measure which provides instantaneous information about the popularity of an application. Analysis of our data using this trend filter shows that roughly 40% of all apps never gain more than a handful of users (Marginal apps). Less than 0.1% of the remaining 60% are constantly popular (Dominant apps), 1% have a quick drain of usage after an initial steep rise (Expired apps), and 6% continuously rise in popularity (Hot apps). From these, we can distinguish, for instance, trendsetters from copycat apps. We conclude by demonstrating that usage behaviour trend information can be used to develop better mobile app recommendations.},
doi = {10.1145/3199677},
group = {ambience}}
Popularity of mobile apps is traditionally measured by metrics such as the number of downloads, installations, or user ratings. A problem with these measures is that they reflect usage only indirectly. Indeed, retention rates, i.e., the number of days users continue to interact with an installed app, have been suggested to predict successful app lifecycles. We conduct the first independent and large-scale study of retention rates and usage trends on a dataset of app-usage data from a community of 339,842 users and more than 213,667 apps. Our analysis shows that, on average, applications lose 65% of their users in the first week, while very popular applications (top 100) lose only 35%. It also reveals, however, that many applications have more complex usage behaviour patterns due to seasonality, marketing, or other factors. To capture such effects, we develop a novel app-usage trend measure which provides instantaneous information about the popularity of an application. Analysis of our data using this trend filter shows that roughly 40% of all apps never gain more than a handful of users (Marginal apps). Less than 0.1% of the remaining 60% are constantly popular (Dominant apps), 1% have a quick drain of usage after an initial steep rise (Expired apps), and 6% continuously rise in popularity (Hot apps). From these, we can distinguish, for instance, trendsetters from copycat apps. We conclude by demonstrating that usage behaviour trend information can be used to develop better mobile app recommendations.
Mobile Brainwaves: On the Interchangeability of Simple Authentication Tasks with Low-Cost, Single-Electrode EEG Devices.
Haukipuro, E.; Kolehmainen, V.; Myllarinen, J.; Remander, S.; Salo, J. T.; Takko, T.; Nguyen, L. N.; Sigg, S.; and Findling, R.
IEICE Transactions, Special issue on Sensing, Wireless Networking, Data Collection, Analysis and Processing Technologies for Ambient Intelligence with Internet of Things. 2019.
paper
doi
link
bibtex
abstract
@article{Haukipuro_2019_IEICE,
author={Eeva-Sofia Haukipuro and Ville Kolehmainen and Janne Myllarinen and Sebastian Remander and Janne T. Salo and Tuomas Takko and Le Ngu Nguyen and Stephan Sigg and Rainhard Findling},
journal={IEICE Transactions, Special issue on Sensing, Wireless Networking, Data Collection, Analysis and Processing Technologies for Ambient Intelligence with Internet of Things},
title={Mobile Brainwaves: On the Interchangeability of Simple Authentication Tasks with Low-Cost, Single-Electrode EEG Devices},
year={2019},
url_Paper = {http://ambientintelligence.aalto.fi/findling/pdfs/publications/Haukipuro_19_MobileBrainwavesInterchangeability.pdf},
abstract = {Electroencephalography (EEG) for biometric authentication has received some attention in recent years. In this paper, we explore the effect of three simple EEG related authentication tasks, namely resting, thinking about a picture, and moving a single finger, on mobile, low-cost, single electrode based EEG authentication. We present details of our authentication pipeline, including extracting features from the frequency power spectrum and MFCC, and training a multilayer perceptron classifier for authentication. For our evaluation we record an EEG dataset of 27 test subjects. We use a baseline, task-agnostic, and task-specific evaluation setup to investigate if different tasks can be used in place of each other for authentication. We further evaluate if tasks themselves can be told apart from each other. Evaluation results suggest that tasks differ, hence to some extent are distinguishable, as well as that our authentication approach can work in a task-specific as well as in a task-agnostic manner.},
doi = {10.1587/transcom.2018SEP0016},
group = {ambience}
}
%%% 2018 %%%
Electroencephalography (EEG) for biometric authentication has received some attention in recent years. In this paper, we explore the effect of three simple EEG related authentication tasks, namely resting, thinking about a picture, and moving a single finger, on mobile, low-cost, single electrode based EEG authentication. We present details of our authentication pipeline, including extracting features from the frequency power spectrum and MFCC, and training a multilayer perceptron classifier for authentication. For our evaluation we record an EEG dataset of 27 test subjects. We use a baseline, task-agnostic, and task-specific evaluation setup to investigate if different tasks can be used in place of each other for authentication. We further evaluate if tasks themselves can be told apart from each other. Evaluation results suggest that tasks differ, hence to some extent are distinguishable, as well as that our authentication approach can work in a task-specific as well as in a task-agnostic manner.
Dual target body model for device-free localization applications.
V. Rampa, S. S.; and M. D'Amico, G. G. G.
In
Proc. of Topical Conference on Antennas and Propagation in Wireless Communications (APWC), 2019. IEEE
link
bibtex
abstract
@InProceedings{Savazzi_19_icassp,
author = {V. Rampa, S. Savazzi, M. D'Amico, G. G. Gentili},
booktitle = {Proc. of Topical Conference on Antennas and Propagation in Wireless Communications (APWC)},
title = {Dual target body model for device-free localization applications},
year = {2019},
abstract = {A dual-target model for quantitative evaluation of the influence of two people standing or moving in the surroundings of a radio link is presented here. This physical model, based on the scalar diffraction theory, is able to predict the effects of the attenuation of the electromagnetic (EM) wavefield generated by a RF transmitter caused by the presence of two people standing or moving in the area covered by a radio link. This model allows to relate, for each link, the RSS measurements at the receiver to the position, size and orientation, of two people located in the link area. Unlike complex EM frameworks that cannot be adopted for real- or near real-time applications, the proposed model can be employed for crowd sensing, occupancy estimation and people counting applications for both indoor and outdoor scenarios. In addition, it paves the way to a complete multi-target body model. This novel model toolkit gets over the restrictions of existing simplified multi-body models for DFL applications that exploit linear superposition of single-body effects. The proposed tool is based on the dual knife-edge approach and on a simplified EM body model, but yet effective for DFL applications. Finally, the experimental part shows some preliminary results on the dual-target applications by exploiting RF measurements obtained with WiFi-compliant radio devices working in the 5 GHz band.},
organization={IEEE},
project = {radiosense}
}
A dual-target model for quantitative evaluation of the influence of two people standing or moving in the surroundings of a radio link is presented here. This physical model, based on the scalar diffraction theory, is able to predict the effects of the attenuation of the electromagnetic (EM) wavefield generated by a RF transmitter caused by the presence of two people standing or moving in the area covered by a radio link. This model allows to relate, for each link, the RSS measurements at the receiver to the position, size and orientation, of two people located in the link area. Unlike complex EM frameworks that cannot be adopted for real- or near real-time applications, the proposed model can be employed for crowd sensing, occupancy estimation and people counting applications for both indoor and outdoor scenarios. In addition, it paves the way to a complete multi-target body model. This novel model toolkit gets over the restrictions of existing simplified multi-body models for DFL applications that exploit linear superposition of single-body effects. The proposed tool is based on the dual knife-edge approach and on a simplified EM body model, but yet effective for DFL applications. Finally, the experimental part shows some preliminary results on the dual-target applications by exploiting RF measurements obtained with WiFi-compliant radio devices working in the 5 GHz band.
RadioSense: Wireless Big Data for Collaborative Robotics in Smart Factory.
Savazzi, S.; Rampa, V.; Vicentini, F.; and Nicoli, M. B.
In
Ital-IA Convegno Nazionale CINI sull'Intelligenza Artificiale, pages 1–2, 2019.
link
bibtex
@inproceedings{savazzi2019radiosense,
title={RadioSense: Wireless Big Data for Collaborative Robotics in Smart Factory},
author={Savazzi, Stefano and Rampa, Vittorio and Vicentini, Federico and Nicoli, Monica Barbara},
booktitle={Ital-IA Convegno Nazionale CINI sull'Intelligenza Artificiale},
pages={1--2},
project = {radiosense},
year={2019}
}
Passive Detection and Discrimination of Body Movements in the sub-THz Band: A Case Study.
Kianoush, S.; Savazzi, S.; and Rampa, V.
In
Proc. of International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2019. IEEE
link
bibtex
abstract
@InProceedings{Savazzi_19_icassp,
author = {S. Kianoush and S. Savazzi and V. Rampa},
booktitle = {Proc. of International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
title = {Passive Detection and Discrimination of Body Movements in the sub-THz Band: A Case Study},
year = {2019},
abstract = {Passive radio sensing technique is a well established research topic where radio-frequency (RF) devices are used as real-time virtual probes that are able to detect the presence and the movement(s) of one or more (non instrumented) subjects. However, radio sensing methods usually employ frequencies in the unlicensed 2.4-5.0 GHz bands where multipath effects strongly limit their accuracy, thus reducing their wide acceptance. On the contrary, sub-terahertz (sub-THz) radiation, due to its very short wavelength and reduced multipath effects, is well suited for high-resolution body occupancy detection and vision applications. In this paper, for the first time, we adopt radio devices emitting in the 100 GHz band to process an image of the environment for body motion discrimination inside a workspace area. Movement detection is based on the real-time analysis of body-induced signatures that are estimated from sub-THz measurements and then processed by specific neural network-based classifiers. Experimental trials are employed to validate the proposed methods and compare their performances with application to industrial safety monitoring.},
organization={IEEE},
project = {radiosense}
}
Passive radio sensing technique is a well established research topic where radio-frequency (RF) devices are used as real-time virtual probes that are able to detect the presence and the movement(s) of one or more (non instrumented) subjects. However, radio sensing methods usually employ frequencies in the unlicensed 2.4-5.0 GHz bands where multipath effects strongly limit their accuracy, thus reducing their wide acceptance. On the contrary, sub-terahertz (sub-THz) radiation, due to its very short wavelength and reduced multipath effects, is well suited for high-resolution body occupancy detection and vision applications. In this paper, for the first time, we adopt radio devices emitting in the 100 GHz band to process an image of the environment for body motion discrimination inside a workspace area. Movement detection is based on the real-time analysis of body-induced signatures that are estimated from sub-THz measurements and then processed by specific neural network-based classifiers. Experimental trials are employed to validate the proposed methods and compare their performances with application to industrial safety monitoring.
Pattern Reconfigurable Antennas for Passive Motion Detection: WiFi Test-Bed and First Studies.
Savazzi, S.; Rampa, V.; Kianoush, S.; and Piazza, D
In
Proc. of International Symposium on Personal, Indoor and Mobile Radio Communication (PIMRC), 2019. IEEE
link
bibtex
@InProceedings{Savazzi_19_Antennas,
author = {Stefano Savazzi and Vittorio Rampa and Sanaz Kianoush and D Piazza},
booktitle = {Proc. of International Symposium on Personal, Indoor and
Mobile Radio Communication (PIMRC)},
title = {Pattern Reconfigurable Antennas for Passive Motion Detection: WiFi Test-Bed and First Studies},
year = {2019},
abstract = {},
organization={IEEE},
project = {radiosense}
}
People Counting by Dense WiFi MIMO Networks: Channel Features and Machine Learning Algorithms.
Kianoush, S.; Savazzi, S.; Rampa, V.; and Nicoli, M.
MDPI Sensors, 19(16). 2019.
link
bibtex
abstract
@article{Sanaz_2019_mdpi,
author={Sanaz Kianoush and Stefano Savazzi and Vittorio Rampa and Monica Nicoli},
journal={MDPI Sensors},
title={People Counting by Dense WiFi MIMO Networks: Channel Features and Machine Learning Algorithms},
year={2019},
volume={19},
number={16},
abstract = {Subject counting systems are extensively used in ambient intelligence applications, such as smart home, smart building and smart retail scenarios. In this paper, we investigate the problem of transforming an unmodified WiFi radio infrastructure into a flexible sensing system for passive subject counting. We first introduce the multi-dimensional channel features that capture the subject presence. Then, we compare Bayesian and neural network based machine learning tools specialized for subject discrimination and counting. Ensemble classification is used to leverage space-frequency diversity and combine learning tools trained with different channel features. A combination of multiple models is shown to improve the counting accuracy. System design is based on a dense network of WiFi devices equipped with multiple antennas. Experimental validation is conducted in an indoor space featuring up to five moving people. Real-time computing and practical solutions for cloud migration are also considered. The proposed approach for passive counting gives detection results with 99% average accuracy.},
project = {radiosense}
}
Subject counting systems are extensively used in ambient intelligence applications, such as smart home, smart building and smart retail scenarios. In this paper, we investigate the problem of transforming an unmodified WiFi radio infrastructure into a flexible sensing system for passive subject counting. We first introduce the multi-dimensional channel features that capture the subject presence. Then, we compare Bayesian and neural network based machine learning tools specialized for subject discrimination and counting. Ensemble classification is used to leverage space-frequency diversity and combine learning tools trained with different channel features. A combination of multiple models is shown to improve the counting accuracy. System design is based on a dense network of WiFi devices equipped with multiple antennas. Experimental validation is conducted in an indoor space featuring up to five moving people. Real-time computing and practical solutions for cloud migration are also considered. The proposed approach for passive counting gives detection results with 99% average accuracy.
WiMorse: A Contactless Morse Code Text Input System Using Ambient WiFi Signals.
Niu, K.; Zhang, F.; Jiang, Y.; Xiong, J.; Lv, Q.; Zeng, Y.; and Zhang, D.
IEEE Internet of Things Journal, 6(6): 9993–10008. 2019.
link
bibtex
@article{niu2019wimorse,
title={WiMorse: A Contactless Morse Code Text Input System Using Ambient WiFi Signals},
author={Niu, Kai and Zhang, Fusang and Jiang, Yuhang and Xiong, Jie and Lv, Qin and Zeng, Youwei and Zhang, Daqing},
journal={IEEE Internet of Things Journal},
volume={6},
number={6},
pages={9993--10008},
year={2019},
publisher={IEEE},
project = {radiosense}
}
Physical Model-based Calibration for Device-Free Radio Localization and Motion Tracking.
Rampa, V.; Savazzi, S.; and Kianoush, S.
In
2019 IEEE-APS Topical Conference on Antennas and Propagation in Wireless Communications (APWC), pages 353–358, 2019.
link
bibtex
@inproceedings{rampa2019physical,
title={Physical Model-based Calibration for Device-Free Radio Localization and Motion Tracking},
author={Rampa, Vittorio and Savazzi, Stefano and Kianoush, Sanaz},
booktitle={2019 IEEE-APS Topical Conference on Antennas and Propagation in Wireless Communications (APWC)},
pages={353--358},
year={2019},
project = {radiosense}
}
Dual-target body model for device-free localization applications.
Rampa, V.; Savazzi, S.; D’Amico, M.; and Gentili, G. G.
In
2019 IEEE-APS Topical Conference on Antennas and Propagation in Wireless Communications (APWC), pages 181–186, 2019.
link
bibtex
@inproceedings{rampa2019dual,
title={Dual-target body model for device-free localization applications},
author={Rampa, Vittorio and Savazzi, Stefano and D’Amico, Michele and Gentili, Gian Guido},
booktitle={2019 IEEE-APS Topical Conference on Antennas and Propagation in Wireless Communications (APWC)},
pages={181--186},
year={2019},
project = {radiosense}
}
Passive detection and discrimination of body movements in the sub-THz band: a case study.
Kianoush, S.; Savazzi, S.; and Rampa, V.
In
ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 1597–1601, 2019.
link
bibtex
@inproceedings{kianoush2019passive,
title={Passive detection and discrimination of body movements in the sub-THz band: a case study},
author={Kianoush, Sanaz and Savazzi, Stefano and Rampa, Vittorio},
booktitle={ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages={1597--1601},
year={2019},
project = {radiosense}
}