<script src="https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fnetwork%2Ffiles%2FSXHKYtfTv3ba9BmDx&fileId=SXHKYtfTv3ba9BmDx&msg=preview&jsonp=1"></script>
<?php
$contents = file_get_contents("https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fnetwork%2Ffiles%2FSXHKYtfTv3ba9BmDx&fileId=SXHKYtfTv3ba9BmDx&msg=preview");
print_r($contents);
?>
<iframe src="https://bibbase.org/show?bib=https%3A%2F%2Fbibbase.org%2Fnetwork%2Ffiles%2FSXHKYtfTv3ba9BmDx&fileId=SXHKYtfTv3ba9BmDx&msg=preview"></iframe>
For more details see the documention.
To the site owner:
Action required! Mendeley is changing its API. In order to keep using Mendeley with BibBase past April 14th, you need to:
@inproceedings{Giusto2024, author = {Edoardo Giusto and Gabriele Iurlaro and Bartolomeo Montrucchio and Alberto Scionti and Olivier Terzo and Chiara Vercellino and Giacomo Vitali and Paolo Viviani}, title = {Harnessing a 256-qubit Neutral Atom Simulator for Graph Classification}, booktitle = {Proceedings of the 2024 IEEE International Conference on Quantum Computing and Engineering (QCE)}, year = {2024}, address = {Montreal, Canada}, note = {In press}, url = {https://hdl.handle.net/11583/2992950} }
@proceedings{asme_turboexpo_24, author = {Biassoni, Daniele and Russo, Matteo and Viviani, Paolo and Vitali, Giacomo and Lengani, Davide}, title = "{A High-Performance Code for Analyzing Loss Transport Equations in High-Fidelity Simulations}", volume = {Volume 12C: Turbomachinery — Design Methods and CFD Modeling for Turbomachinery; Ducts, Noise, and Component Interactions}, series = {Turbo Expo: Power for Land, Sea, and Air}, pages = {V12CT32A039}, year = {2024}, month = {06}, abstract = "{The subject of the present paper is the development of a general procedure to calculate the terms of the total pressure transport equations using a High-Performance Data Analytics (HPDA), based on Proper Orthogonal Decomposition (POD) and leveraging on High Performance Computing. This method is applied to data obtained from high fidelity simulations of low pressure turbine (LPT) blades in order to separate the different loss contributions and to visualize the associated structures to the fluid dynamics phenomena that occur inside the passage. This procedure is developed in Python environment because it easily allows parallel computing.This paper discusses the mathematical framework behind the decomposition of total pressure transport equation and its implementation in Python. The scalability tests are performed for two exemplary datasets and compared to previous implementation showing a considerable speed-up. The procedure applied to Large Eddy Simulation (LES) data shows significant improvement over traditional approaches. It provides more detailed information about the phenomena associated with the generation of losses in the turbine blades, allowing for quick identification of where these losses occur. The HPDA code can be applied to all high fidelity simulations (LES and DNS) in order to get more information from simulations that are extremely expensive, allowing the full exploitation of such large datasets. In addition, to demonstrate the ease of code’s implementation, the data obtained from the POD are compared with data obtained from Fourier decomposition to validate the procedure. The procedure is open access and available in an online repository.}", doi = {10.1115/GT2024-127953}, url = {https://doi.org/10.1115/GT2024-127953}, eprint = {https://asmedigitalcollection.asme.org/GT/proceedings-pdf/GT2024/88070/V12CT32A039/7371105/v12ct32a039-gt2024-127953.pdf}, }
@inproceedings{hpdc_keynote_2024, author = {Viviani, Paolo}, title = {Demistifying HPC-Quantum integration: it's all about scheduling}, year = {2024}, isbn = {9798400706431}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3659996.3673223}, doi = {10.1145/3659996.3673223}, abstract = {Recent research on the integration between HPC and quantum computer was mostly focused on the software stack and quantum circuit compilation aspects, neglecting critical issues like HPC resource allocation and job scheduling given the scarcity of QPUs, and disregarding the heterogeneity of current quantum technologies and their computational models (e.g., digital vs. analogue). This work would like to bring the attention to issues that are critical to achieve integration with operational HPC environments given the current status of quantum computers maturity and heterogeneity.}, booktitle = {Proceedings of the 2024 Workshop on High Performance and Quantum Computing Integration}, pages = {1–3}, numpages = {3}, keywords = {quantum computing, HPC, parallel programming models, job scheduling, resource allocation}, location = {Pisa, Italy}, series = {HPQCI '24} }
@inproceedings{lubranoAdvancedResourceAllocation2024, title = {Advanced {{Resource Allocation}} in the {{Context}} of {{Heterogeneous Workflows Management}}}, booktitle = {Proceedings of the 2nd {{Workshop}} on {{Workflows}} in {{Distributed Environments}}}, author = {Lubrano, Francesco and Vercellino, Chiara and Vitali, Giacomo and Viviani, Paolo and Scionti, Alberto and Terzo, Olivier}, year = {2024}, month = apr, pages = {14--20}, publisher = {ACM}, address = {Athens Greece}, doi = {10.1145/3642978.3652835}, urldate = {2024-04-23}, isbn = {9798400705465}, abstract = {In High-Performance Computing (HPC), workflows are utilized to define and manage a set of interdependent computations which allow the users to extract insights from (scientific) numerical simulations or data analytics. HPC platforms can perform extreme-scale simulations, combining Artificial Intelligence (AI) training and inference and data analytics (we refer to heterogeneous workflows), by providing tools and computing resources which serve a variety of use-cases spanning very diverse application domains (e.g., weather forecasting, quantum mechanics, etc.). Executing such workflows at scale requires to handle dependencies, job submission automation, I/O mechanisms. Despite State-of-the-Art batch schedulers can be configured and integrated with tools accomplishing this automation, a number of cases where resource allocation can lead to inefficiencies still exist. In this paper, to overcome these limitations, we present the WARP (Workflow-aware Advanced Resource Planner), a tool that integrates with workflow management tools and batch schedulers, to reserve in advance resources for an optimal execution of jobs, based on their duration, dependencies and machine load. WARP has been designed to minimize the overall workflow execution, without violating the priority policies for cluster users imposed by the system administrators.}, langid = {english} }
@inproceedings{vercellinoBBQmISParallelQuantum2023, title = {{{BBQ-mIS}}: {{A Parallel Quantum Algorithm}} for {{Graph Coloring Problems}}}, shorttitle = {{{BBQ-mIS}}}, booktitle = {2023 {{IEEE International Conference}} on {{Quantum Computing}} and {{Engineering}} ({{QCE}})}, author = {Vercellino, Chiara and Vitali, Giacomo and Viviani, Paolo and Giusto, Edoardo and Scionti, Alberto and Scarabosio, Andrea and Terzo, Olivier and Montrucchio, Bartolomeo}, year = {2023}, month = sep, pages = {141--147}, publisher = {{IEEE}}, address = {{Bellevue, WA, USA}}, doi = {10.1109/QCE57702.2023.10198}, urldate = {2023-12-01}, abstract = {Among the limitations of current quantum machines, the qubits count represents one of the most critical challenges for porting reasonably large computational problems, such as those coming from real-world applications, to the scale of the quantum hardware. In this regard, one possibility is to decompose the problems at hand and exploit parallelism over multiple size-limited quantum resources. To this purpose, we designed a hybrid quantum-classical algorithm, i.e., BBQ-mIS, to solve graph coloring problems on Rydberg atoms quantum machines. The BBQ-mIS algorithm combines the natural representation of Maximum Independent Set (MIS) problems onto the machine Hamiltonian with a Branch&Bound (BB) approach to identify a proper graph coloring. In the proposed solution, the graph representation emerges from qubit interactions (qubits represent vertexes of the graph), and the coloring is then retrieved by iteratively assigning one color to a maximal set of independent vertexes of the graph, still minimizing the number of colors with the Branch&Bound approach. We emulated real quantum hardware onto an IBM Power9-based cluster, with 32 cores/node and 256 GB/node, and exploited an MPI-enhanced library to implement the parallelism for the BBQ-mIS algorithm. Considering this use case, we also identify some technical requirements and challenges for an effective HPC-QC integration. The results show that our problem decomposition is effective in terms of graph coloring solutions quality, and provide a reference for applying this methodology to other quantum technologies or applications.}, isbn = {9798350343236} }
@InProceedings{10.1007/978-3-031-43427-3_23, author="Viviani, Paolo and Gesmundo, Ilaria and Ghinato, Elios and Agudelo-Toro, Andres and Vercellino, Chiara and Vitali, Giacomo and Bergamasco, Letizia and Scionti, Alberto and Ghislieri, Marco and Agostini, Valentina and Terzo, Olivier and Scherberger, Hansj{\"o}rg", editor="De Francisci Morales, Gianmarco and Perlich, Claudia and Ruchansky, Natali and Kourtellis, Nicolas and Baralis, Elena and Bonchi, Francesco", title="Deep Learning for Real-Time Neural Decoding of Grasp", booktitle="Machine Learning and Knowledge Discovery in Databases: Applied Data Science and Demo Track", year="2023", publisher="Springer Nature Switzerland", address="Cham", pages="379--393", abstract="Neural decoding involves correlating signals acquired from the brain to variables in the physical world like limb movement or robot control in Brain Machine Interfaces. In this context, this work starts from a specific pre-existing dataset of neural recordings from monkey motor cortex and presents a Deep Learning-based approach to the decoding of neural signals for grasp type classification. Specifically, we propose here an approach that exploits LSTM networks to classify time series containing neural data (i.e., spike trains) into classes representing the object being grasped.", isbn="978-3-031-43427-3", doi = {10.1007/978-3-031-43427-3_23} }
@inproceedings{vercellino_compsac_2023, title = {Neural Optimization for Quantum Architectures: Graph Embedding Problems with {{Distance Encoder Networks}}}, shorttitle = {Neural Optimization for Quantum Architectures}, booktitle = {2023 {{IEEE}} 47th {{Annual Computers}}, {{Software}}, and {{Applications Conference}} ({{COMPSAC}})}, author = {Vercellino, Chiara and Vitali, Giacomo and Viviani, Paolo and Scionti, Alberto and Scarabosio, Andrea and Terzo, Olivier and Giusto, Edoardo and Montrucchio, Bartolomeo}, year = {2023}, month = jun, pages = {380--389}, publisher = {{IEEE}}, address = {{Torino, Italy}}, doi = {10.1109/COMPSAC57700.2023.00058}, url = {https://ieeexplore.ieee.org/document/10196840}, urldate = {2023-08-03}, abstract = {Quantum machines are among the most promising technologies expected to provide significant improvements in the following years. However, bridging the gap between real-world applications and their implementation on quantum hardware is still a complicated task. One of the main challenges is to represent through qubits (i.e., the basic units of quantum information) the problems of interest. According to the specific technology under-lying the quantum machine, it is necessary to implement a proper representation strategy, generally referred to as embedding. This paper introduces a neural-enhanced optimization framework to solve the constrained unit disk problem, which arises in the context of qubits positioning for neutral atoms-based quantum hardware. The proposed approach involves a modified autoencoder model, i.e., the Distances Encoder Network, and a custom loss, i.e., the Embedding Loss Function, respectively, to compute Euclidean distances and model the optimization constraints. The core idea behind this design relies on the capability of neural networks to approximate non-linear transformations to make the Distances Encoder Network learn the spatial transformation that maps initial non-feasible solutions of the constrained unit disk problem into feasible ones. The proposed approach outperforms classical solvers, given fixed comparable computation times, and paves the way to address other optimization problems through a similar strategy.}, isbn = {9798350326970} }
@article{vercellino_fgcs_23, title = {A Machine Learning Approach for an HPC Use Case: the Jobs Queuing Time Prediction}, journal = {Future Generation Computer Systems}, volume = {143}, pages = {215-230}, year = {2023}, issn = {0167-739X}, doi = {https://doi.org/10.1016/j.future.2023.01.020}, url = {https://www.sciencedirect.com/science/article/pii/S0167739X23000274}, author = {Chiara Vercellino and Alberto Scionti and Giuseppe Varavallo and Paolo Viviani and Giacomo Vitali and Olivier Terzo}, keywords = {High performance computing, Queues, Batch scheduler, Automatism, Machine learning, Uncertainty quantification}, abstract = {High-Performance Computing (HPC) domain provided the necessary tools to support the scientific and industrial advancements we all have seen during the last decades. HPC is a broad domain targeting to provide both software and hardware solutions as well as envisioning methodologies that allow achieving goals of interest, such as system performance and energy efficiency. In this context, supercomputers have been the vehicle for developing and testing the most advanced technologies since their first appearance. Unlike cloud computing resources that are provided to the end-users in an on-demand fashion in the form of virtualized resources (i.e., virtual machines and containers), supercomputers’ resources are generally served through State-of-the-Art batch schedulers (e.g., SLURM, PBS, LSF, HTCondor). As such, the users submit their computational jobs to the system, which manages their execution with the support of queues. In this regard, predicting the behaviour of the jobs in the batch scheduler queues becomes worth it. Indeed, there are many cases where a deeper knowledge of the time experienced by a job in a queue (e.g., the submission of check-pointed jobs or the submission of jobs with execution dependencies) allows exploring more effective workflow orchestration policies. In this work, we focused on applying machine learning (ML) techniques to learn from the historical data collected from the queuing system of real supercomputers, aiming at predicting the time spent on a queue by a given job. Specifically, we applied both unsupervised learning (UL) and supervised learning (SL) techniques to define the most effective features for the prediction task and the actual prediction of the queue waiting time. For this purpose, two approaches have been explored: on one side, the prediction of ranges on jobs’ queuing times (classification approach) and, on the other side, the prediction of the waiting time at the minutes level (regression approach). Experimental results highlight the strong relationship between the SL models’ performances and the way the dataset is split. At the end of the prediction step, we present the uncertainty quantification approach, i.e., a tool to associate the predictions with reliability metrics, based on variance estimation.} }
@article{savioAcceleratingLegacyApplications2022, title = {Accelerating Legacy Applications with Spatial Computing Devices}, author = {Savio, Paolo and Scionti, Alberto and Vitali, Giacomo and Viviani, Paolo and Vercellino, Chiara and Terzo, Olivier and Nguyen, Huy-Nam and Magarielli, Donato and Spano, Ennio and Marconcini, Michele and Poli, Francesco}, year = {2022}, month = nov, journal = {The Journal of Supercomputing}, issn = {0920-8542, 1573-0484}, doi = {10.1007/s11227-022-04925-2}, abstract = {Abstract Heterogeneous computing is the major driving factor in designing new energy-efficient high-performance computing systems. Despite the broad adoption of GPUs and other specialized architectures, the interest in spatial architectures like field-programmable gate arrays (FPGAs) has grown. While combining high performance, low power consumption and high adaptability constitute an advantage, these devices still suffer from a weak software ecosystem, which forces application developers to use tools requiring deep knowledge of the underlying system, often leaving legacy code (e.g., Fortran applications) unsupported. By realizing this, we describe a methodology for porting Fortran (legacy) code on modern FPGA architectures, with the target of preserving performance/power ratios. Aimed as an experience report, we considered an industrial computational fluid dynamics application to demonstrate that our methodology produces synthesizable OpenCL codes targeting Intel Arria10 and Stratix10 devices. Although performance gain is not far beyond that of the original CPU code (we obtained a relative speedup of \$\$\textbackslash times\$\$ \texttimes{} ~0.59 and \$\$\textbackslash times\$\$ \texttimes{} ~0.63, respectively, for a single optimized main kernel, while only on the Stratix10 we achieved \$\$\textbackslash times\$\$ \texttimes{} ~2.56 by replicating the main optimized kernel 4 times), our results are quite encouraging to drawn the path for further investigations. This paper also reports some major criticalities in porting Fortran code on FPGA architectures.}, langid = {english} }
@inproceedings{vercellinoNeuralpoweredUnitDisk2022, title = {Neural-Powered Unit Disk Graph Embedding: Qubits Connectivity for Some {{QUBO}} Problems}, shorttitle = {Neural-Powered Unit Disk Graph Embedding}, booktitle = {2022 {{IEEE International Conference}} on {{Quantum Computing}} and {{Engineering}} ({{QCE}})}, author = {Vercellino, Chiara and Viviani, Paolo and Vitali, Giacomo and Scionti, Alberto and Scarabosio, Andrea and Terzo, Olivier and Giusto, Edoardo and Montrucchio, Bartolomeo}, year = {2022}, month = sep, pages = {186--196}, publisher = {{IEEE}}, address = {{Broomfield, CO, USA}}, doi = {10.1109/QCE53715.2022.00038}, isbn = {978-1-66549-113-6}, url = {https://www.computer.org/csdl/proceedings-article/qce/2022/911300a186/}, abstract = {Graph embedding is a recurrent problem in quantum computing, for instance, quantum annealers need to solve a minor graph embedding in order to map a given Quadratic Unconstrained Binary Optimization (QUBO) problem onto their internal connectivity pattern. This work presents a novel approach to constrained unit disk graph embedding, which is encountered when trying to solve combinatorial optimization problems in QUBO form, using quantum hardware based on neutral Rydberg atoms. The qubits, physically represented by the atoms, are excited to the Rydberg state through laser pulses. Whenever qubits pairs are closer together than the blockade radius, entanglement can be reached, thus preventing entangled qubits to be simultaneously in the excited state. Hence, the blockade radius determines the adjacency pattern among qubits, corresponding to a unit disk configuration. Although it is straightforward to compute the adjacency pattern given the qubits’ coordinates, identifying a feasible unit disk arrangement that matches the desired QUBO matrix is, on the other hand, a much harder task. In the context of quantum optimization, this issue translates into the physical placement of the qubits in the 2D/3D register to match the machine’s Ising-like Hamiltonian with the QUBO formulation of the optimization problems. The proposed solution exploits the power of neural networks to transform an initial embedding configuration, which does not match the quantum hardware requirements or does not account for the unit disk property, into a feasible embedding properly representing the target optimization problems. Experimental results show that this new approach overcomes in performance Gurobi solver.} }
@incollection{vivianiTamingMultinodeAccelerated2022, address = {Cham}, title = {Taming {Multi}-node {Accelerated} {Analytics}: {An} {Experience} in {Porting} {MATLAB} to {Scale} with {Python}}, volume = {497}, isbn = {978-3-031-08811-7 978-3-031-08812-4}, shorttitle = {Taming {Multi}-node {Accelerated} {Analytics}}, url = {https://link.springer.com/10.1007/978-3-031-08812-4_20}, language = {en}, urldate = {2022-06-23}, booktitle = {Complex, {Intelligent} and {Software} {Intensive} {Systems}}, publisher = {Springer International Publishing}, author = {Viviani, Paolo and Vitali, Giacomo and Lengani, Davide and Scionti, Alberto and Vercellino, Chiara and Terzo, Olivier}, editor = {Barolli, Leonard}, year = {2022}, doi = {10.1007/978-3-031-08812-4_20}, note = {Series Title: Lecture Notes in Networks and Systems}, pages = {200--210}, file = {Viviani et al. - 2022 - Taming Multi-node Accelerated Analytics An Experi.pdf:/Users/pvi/Documents/Zotero/storage/WT8F3ICS/Viviani et al. - 2022 - Taming Multi-node Accelerated Analytics An Experi.pdf:application/pdf}, }
@incollection{vitaliDynamicJobAllocation2022, address = {Cham}, title = {Dynamic {Job} {Allocation} on {Federated} {Cloud}-{HPC} {Environments}}, volume = {497}, isbn = {978-3-031-08811-7 978-3-031-08812-4}, url = {https://link.springer.com/10.1007/978-3-031-08812-4_8}, language = {en}, urldate = {2022-06-23}, booktitle = {Complex, {Intelligent} and {Software} {Intensive} {Systems}}, publisher = {Springer International Publishing}, author = {Vitali, Giacomo and Scionti, Alberto and Viviani, Paolo and Vercellino, Chiara and Terzo, Olivier}, editor = {Barolli, Leonard}, year = {2022}, doi = {10.1007/978-3-031-08812-4_8}, note = {Series Title: Lecture Notes in Networks and Systems}, pages = {71--82}, }
@incollection{sciontiDistributedHPCResources2022, title = {Distributed {{HPC Resources Orchestration}} for {{Supporting Large-Scale Workflow Execution}}}, shorttitle = {{{HPC}}, {{Big Data}}, and {{AI Convergence Towards Exascale}}}, booktitle = {{{HPC}}, {{Big Data}}, and {{AI Convergence Towards Exascale}}: {{Challenge}} and {{Vision}}}, author = {Scionti, Alberto and Viviani, Paolo and Vitali, Giacomo and Vercellino, Chiara and Terzo, Olivier and Hachinger, Stephan and Vojacek, Luk{\'a}{\v s}}, year = {2022}, month = jan, edition = {First}, pages = {23}, publisher = {{CRC Press}}, address = {{New York}}, doi = {10.1201/9781003176664}, abstract = {Artificial intelligence (AI) is gaining momentum in the scientific and industrial community for the ever-growing number of applications where such innovative techniques of learning form and processing large amount of data have proved successful. High-performance computing (HPC) and cloud resources providers are moving faster to be able to support new applications that benefit from the combination of traditional HPC simulation, machine learning and deep learning processing and big data analytics. However, the tighter the combination of these three elements is, the more complex the integration of innovative architectures into a single execution platform becomes. On one hand, application workflow management systems need to incorporate more functionalities and support dynamism in the execution, by preserving (energy) efficiency of the infrastructural resources. On the other hand, more exotic hardware accelerators (ranging from GPUs and FPGAs, to neural network processors (NNPs), to neuromorphic processors) need to be integrated in the computing assets in order to leverage performance boost. This chapter provides an overview of the future HPC, AI, and big-data cross-stack execution platform, as devised in the funded EuroHPC ACROSS project, which will be tailored to cope with all these challenges, and to support future exascale-ready applications.}, isbn = {978-1-00-317666-4}, langid = {english} }
@incollection{sciontiEnablingHPCArtificial2022, title = {Enabling the {{HPC}} and {{Artificial Intelligence Cross-Stack Convergence}} at the {{Exascale Level}}}, shorttitle = {{{HPC}}, {{Big Data}}, and {{AI Convergence Towards Exascale}}}, booktitle = {{{HPC}}, {{Big Data}}, and {{AI Convergence Towards Exascale}}: {{Challenge}} and {{Vision}}}, author = {Scionti, Alberto and Viviani, Paolo and Vitali, Giacomo and Vercellino, Chiara and Terzo, Olivier}, year = {2022}, month = jan, edition = {First}, pages = {22}, publisher = {{CRC Press}}, address = {{New York}}, doi = {10.1201/9781003176664}, abstract = {Artificial intelligence (AI) is gaining momentum in the scientific and industrial community for the ever-growing number of applications where such innovative techniques of learning form and processing large amount of data have proved successful. High-performance computing (HPC) and cloud resources providers are moving faster to be able to support new applications that benefit from the combination of traditional HPC simulation, machine learning and deep learning processing and big data analytics. However, the tighter the combination of these three elements is, the more complex the integration of innovative architectures into a single execution platform becomes. On one hand, application workflow management systems need to incorporate more functionalities and support dynamism in the execution, by preserving (energy) efficiency of the infrastructural resources. On the other hand, more exotic hardware accelerators (ranging from GPUs and FPGAs, to neural network processors (NNPs), to neuromorphic processors) need to be integrated in the computing assets in order to leverage performance boost. This chapter provides an overview of the future HPC, AI, and big-data cross-stack execution platform, as devised in the funded EuroHPC ACROSS project, which will be tailored to cope with all these challenges, and to support future exascale-ready applications.}, isbn = {978-1-00-317666-4}, langid = {english} }
@inproceedings{21:lexis, author = "Hachinger, Stephan and Martinovič, Jan and Terzo, Olivier and Levrier, Marc and Scionti, Alberto and Magarielli, Donato and Goubier, Thierry and Parodi, Antonio and Harsh, Piyush and Apopei, Florin-Ionut and Munke, Johannes and García-Hernández, Rubén and Golasowski, Martin and Hayek, Mohamad and Donnat, Frédéric and Ganne, Laurent and Koch-Hofer, Cédric and Vitali, Giacomo and Viviani, Paolo and Schorlemmer, Danijel and Danovaro, Emanuele and Parodi, Andrea and Murphy, Seán and Dees, Aaron", title = "{HPC-Cloud-Big Data Convergent Architectures and Research Data Management: The LEXIS Approach}", doi = "10.22323/1.378.0004", booktitle = "Proceedings of International Symposium on Grids & Clouds 2021 {\textemdash} PoS(ISGC2021)", year = 2021, volume = "378", pages = "004", abstract = {The LEXIS project (Large-scale EXecution for Industry & Society, H2020 GA825532) provides a platform for optimised execution of Cloud-HPC workflows, reducing computation time and increasing energy efficiency. The system will rely on advanced, distributed orchestration solutions (Atos YSTIA Suite, with Alien4Cloud and Yorc, based on TOSCA), the High-End Application Execution Middleware HEAppE, and new hardware capabilities for maximising efficiency in data processing, analysis and transfer (e.g. Burst Buffers with GPU- and FPGA-based data reprocessing). LEXIS handles computation tasks and data from three Pilots, based on representative and demanding HPC/Cloud-Computing use cases in Industry (SMEs) and Science: i) Simulations of complex turbomachinery and gearbox systems in Aeronautics, ii) Tsunami simulations and earthquake loss assessments which are time-constrained to enable immediate warnings and to support well-informed decisions, and iii) Weather and Climate simulations where massive amounts of in-situ data are assimilated to improve forecasts. A user-friendly LEXIS web portal, as a unique entry point, will provide access to data as well as workflow-handling and remote visualisation functionality. As part of its back-end, LEXIS builds an elaborate system for the handling of input, intermediate and result data. At its core, a Distributed Data Infrastructure (DDI) ensures the availability of LEXIS data at all participating HPC sites, which will be federated with a common LEXIS Authentication and Authorisation Infrastructure (with unified security model, user database and policies). The DDI leverages best of breed data-management solutions from EUDAT, such as B2SAFE (based on iRODS) and B2HANDLE. REST APIs on top of it will ensure a smooth interaction with LEXIS workflows and the orchestration layer. Last, but not least, the DDI will provide functionalities for Research Data Management following the FAIR principles (“Findable, Interoperable, Accessible, Reusable”), e.g. DOI acquisition, which helps to publish and disseminate open data products.} }
@inproceedings{21:sc:quantum, title = {Towards Optimal Graph Coloring Using Rydberg Atoms}, author = {Vitali, Giacomo and Viviani, Paolo and Vercellino, Chiara and Scarabosio, Andrea and Scionti, Alberto and Terzo, Olivier and Giusto, Edoardo and Montrucchio, Bartolomeo}, year = 2021, booktitle = {The International Conference for High Performance Computing, Networking, Storage, and Analysis, Research posters}, location = {St. Louis, MO, USA}, publisher = {}, address = {}, pages = {}, isbn = {}, url = {https://sc21.supercomputing.org/presentation/?id=rpost113&sess=sess278}, abstract = {Quantum mechanics is expected to revolutionize the computing landscape in the near future. Among the many candidate technologies for building universal quantum computers, Rydberg atoms-based systems stand out for being capable of performing both quantum simulations and working as gate-based universal quantum computers while operating at room temperature through an optical system. Moreover, they can potentially scale up to hundreds of quantum bits (qubits). In this work, we solve a Graph Coloring problem by iteratively computing the solutions of Maximal Independent Set (MIS) problems, exploiting the Rydberg blockade phenomenon. Experimental results using a simulation framework on the CINECA Marconi-100 supercomputer demonstrate the validity of the proposed approach.}, numpages = {}, keywords = {quantum computing,graph coloring,graph,neutral atoms,quantum simulator,hpc} }
@inproceedings{20:sac:blockchain, title = {Authenticated and Auditable Data Sharing via Smart Contract}, author = {Reniers, Vincent and Gao, Yuan and Zhang, Ren and Viviani, Paolo and Madhusudan, Akash and Lagaisse, Bert and Nikova, Svetla and Van Landuyt, Dimitri and Lombardi, Riccardo and Preneel, Bart and Joosen, Wouter}, year = 2020, booktitle = {Proceedings of the 35th Annual ACM Symposium on Applied Computing}, location = {Brno, Czech Republic}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, series = {SAC '20}, pages = {324–331}, doi = {10.1145/3341105.3373957}, isbn = 9781450368667, url = {https://doi.org/10.1145/3341105.3373957}, abstract = {Our main use case features multiple companies that iteratively optimize on the architectural properties of aircraft components in a decentralized manner. In each optimization step of the so-called multi-disciplinary optimization (MDO) process, sensitive data is exchanged between organizations, and we require auditability and traceability of actions taken to assure compliance with signed legal agreements.In this paper, we present a distributed protocol that coordinates authenticated and auditable exchanges of files, leveraging a smart contract. The entire life cycle of a file exchange, including file registration, access request and key distribution, is recorded and traceable via the smart contract. Moreover, when one party raises a dispute, the smart contract can be used to identify the dishonest party without compromising the file's confidentiality.The proposed protocol provides a simple, novel, yet efficient approach to exchange files with support for data access auditability between companies involved in a private consortium with no incentive to share files outside of the protocol. We implemented the protocol in Solidity, deployed it on a private Ethereum blockchain, and validated it within the use case of a decentralized workflow.}, numpages = 8, keywords = {blockchain storage, data sharing smart contract, distributed shared ledger, auditable data sharing} }