\n \n \n
\n
\n\n \n \n \n \n \n Proceedings of the Fifth Annual Symposium on Combinatorial Search, SOCS 2012.\n \n \n \n\n\n \n Borrajo, D.; Felner, A.; Korf, R.; Likhachev, M.; Linares López, C.; Ruml, W.; and Sturtevant, N.,\n editors.\n \n\n\n \n\n\n\n Niagara Falls, Ontario (Canada), July 2012.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@Proceedings{\t borrajo.d.felner.a.ea:proceedings,\n title\t\t= {Proceedings of the Fifth Annual Symposium on Combinatorial\n\t\t Search, SOCS 2012},\n year\t\t= 2012,\n editor\t= {Daniel Borrajo and Ariel Felner and Richard Korf and Maxim\n\t\t Likhachev and Carlos {Linares L\\'opez} and Wheeler Ruml and\n\t\t Nathan Sturtevant},\n address\t= {Niagara Falls, Ontario (Canada)},\n month\t\t= jul\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n The Symposium on Combinatorial Search.\n \n \n \n \n\n\n \n Borrajo, D.; Likhachev, M.; and Linares López, C.\n\n\n \n\n\n\n
AI Communications, 25(3): 209–210. 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@Article{\t borrajo.d.likhachev.m.ea:symposium,\n author\t= {Daniel Borrajo and Maxim Likhachev and Carlos {Linares\n\t\t L\\'opez}},\n title\t\t= {The Symposium on Combinatorial Search},\n journal\t= {AI Communications},\n year\t\t= 2012,\n volume\t= 25,\n number\t= 3,\n pages\t\t= {209--210},\n doi\t\t= {10.3233/AIC-2012-0530},\n url\t\t= {http://iospress.metapress.com/content/9u44m4134p1u4l76/}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n A Survey of the Seventh International Planning Competition.\n \n \n \n \n\n\n \n Coles, A. J.; Coles, A.; García Olaya, A.; Jiménez, S.; Linares López, C.; Sanner, S.; and Yoon, S.\n\n\n \n\n\n\n
AI Magazine, 33(1): 83–88. 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@Article{\t coles.aj.coles.a.ea:survey,\n author\t= {Amanda Jane Coles and Andrew Coles and Angel {Garc\\'ia\n\t\t Olaya} and Sergio Jim\\'enez and Carlos {Linares L\\'opez}\n\t\t and Scott Sanner and Sungwook Yoon},\n title\t\t= {A Survey of the Seventh International Planning\n\t\t Competition},\n journal\t= {{AI} Magazine},\n year\t\t= {2012},\n volume\t= {33},\n number\t= {1},\n pages\t\t= {83--88},\n url\t\t= {http://www.aaai.org/ojs/index.php/aimagazine/issue/view/197/showToc}\n\t\t \n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Precomputed-direction Heuristics for Suboptimal Grid-based Path-finding.\n \n \n \n \n\n\n \n Parra, Á.; Torralba, Á.; and Linares López, C.\n\n\n \n\n\n\n In
Proceedings of the Fifth Annual Symposium on Combinatorial Search, SOCS 2012, pages 211–212, Niagara Falls, Ontario (Canada), July 2012. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@InProceedings{\t parra.torralba.ea:precomputed-direction,\n author\t= {Álvaro Parra and Álvaro Torralba and Carlos {Linares\n\t\t L\\'opez}},\n title\t\t= {Precomputed-direction Heuristics for Suboptimal Grid-based\n\t\t Path-finding},\n booktitle\t= {Proceedings of the Fifth Annual Symposium on Combinatorial\n\t\t Search, SOCS 2012},\n pages\t\t= {211--212},\n year\t\t= 2012,\n address\t= {Niagara Falls, Ontario (Canada)},\n month\t\t= jul,\n url\t\t= {http://www.aaai.org/Library/SOCS/socs12contents.php}\n}\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Calibrating a motion model based on reinforcement learning for pedestrian simulation.\n \n \n \n\n\n \n Martinez-Gil, F.; Lozano, M.; and Fernández, F.\n\n\n \n\n\n\n Volume 7660 LNCS 2012.\n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n\n\n\n
\n
@book{Martinez-Gil2012,\nabstract = {In this paper, the calibration of a framework based in Multi-agent Reinforcement Learning (RL) for generating motion simulations of pedestrian groups is presented. The framework sets a group of autonomous embodied agents that learn to control individually its instant velocity vector in scenarios with collisions and friction forces. The result of the process is a different learned motion controller for each agent. The calibration of both, the physical properties involved in the motion of our embodied agents and the corresponding dynamics, is an important issue for a realistic simulation. The physics engine used has been calibrated with values taken from real pedestrian dynamics. Two experiments have been carried out for testing this approach. The results of the experiments are compared with databases of real pedestrians in similar scenarios. As a comparison tool, the diagram of speed versus density, known as fundamental diagram in the literature, is used. {\\textcopyright} 2012 Springer-Verlag Berlin Heidelberg.},\nauthor = {Martinez-Gil, F. and Lozano, M. and Fern{\\'{a}}ndez, F.},\nbooktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\nisbn = {9783642347092},\nissn = {03029743},\nkeywords = {Pedestrian motion learning,Reinforcement Learning},\ntitle = {{Calibrating a motion model based on reinforcement learning for pedestrian simulation}},\nvolume = {7660 LNCS},\nyear = {2012}\n}\n
\n
\n\n\n
\n In this paper, the calibration of a framework based in Multi-agent Reinforcement Learning (RL) for generating motion simulations of pedestrian groups is presented. The framework sets a group of autonomous embodied agents that learn to control individually its instant velocity vector in scenarios with collisions and friction forces. The result of the process is a different learned motion controller for each agent. The calibration of both, the physical properties involved in the motion of our embodied agents and the corresponding dynamics, is an important issue for a realistic simulation. The physics engine used has been calibrated with values taken from real pedestrian dynamics. Two experiments have been carried out for testing this approach. The results of the experiments are compared with databases of real pedestrians in similar scenarios. As a comparison tool, the diagram of speed versus density, known as fundamental diagram in the literature, is used. © 2012 Springer-Verlag Berlin Heidelberg.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Reinforcement learning for decision-making in a business simulator.\n \n \n \n\n\n \n GarcΊa, J.; Borrajo, F.; and FernÁndez, F.\n\n\n \n\n\n\n
International Journal of Information Technology and Decision Making, 11(5). 2012.\n
\n\n
\n\n
\n\n
\n\n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{GarcIa2012,\nabstract = {Business simulators are powerful tools for both supporting the decision-making process of business managers as well as for business education. An example is SIMBA (SIMulator for Business Administration), a powerful simulator which is currently used as a web-based platform for business education in different institutions. In this paper, we propose the application of reinforcement learning (RL) for the creation of intelligent agents that can manage virtual companies in SIMBA. This application is not trivial, given the particular intrinsic characteristics of SIMBA: it is a generalized domain where hundreds of parameters modify the domain behavior; it is a multi-agent domain where both cooperation and competition among different agents can coexist; it is required to set dozens of continuous decision variables for a given business decision, which is made only after the study of hundreds of continuous variables. We will demonstrate empirically that all these challenges can be overcome through the use of RL, showing results for different learning scenarios. {\\textcopyright} 2012 World Scientific Publishing Company.},\nauthor = {Garc{\\'{I}}a, J. and Borrajo, F. and Fern{\\'{A}}ndez, F.},\ndoi = {10.1142/S0219622012500277},\nissn = {02196220},\njournal = {International Journal of Information Technology and Decision Making},\nkeywords = {Reinforcement learning,business simulator,competitive learning,multi-agent learning},\nnumber = {5},\ntitle = {{Reinforcement learning for decision-making in a business simulator}},\nvolume = {11},\nyear = {2012}\n}\n
\n
\n\n\n
\n Business simulators are powerful tools for both supporting the decision-making process of business managers as well as for business education. An example is SIMBA (SIMulator for Business Administration), a powerful simulator which is currently used as a web-based platform for business education in different institutions. In this paper, we propose the application of reinforcement learning (RL) for the creation of intelligent agents that can manage virtual companies in SIMBA. This application is not trivial, given the particular intrinsic characteristics of SIMBA: it is a generalized domain where hundreds of parameters modify the domain behavior; it is a multi-agent domain where both cooperation and competition among different agents can coexist; it is required to set dozens of continuous decision variables for a given business decision, which is made only after the study of hundreds of continuous variables. We will demonstrate empirically that all these challenges can be overcome through the use of RL, showing results for different learning scenarios. © 2012 World Scientific Publishing Company.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n A review of machine learning for automated planning.\n \n \n \n \n\n\n \n Jiménez, S.; De La Rosa, T.; Fernández, S.; Fernández, F.; and Borrajo, D.\n\n\n \n\n\n\n
The Knowledge Engineering Review, 27(04): 433–467. dec 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{Jimenez2012,\nabstract = {Recent discoveries in automated planning are broadening the scope of planners, from toy problems to real applications. However, applying automated planners to real-world problems is far from simple. On the one hand, the definition of accurate action models for planning is still a bottleneck. On the other hand, off-the-shelf planners fail to scale-up and to provide good solutions in many domains. In these problematic domains, planners can exploit domain-specific control knowledge to improve their performance in terms of both speed and quality of the solutions. However, manual definition of control knowledge is quite difficult. This paper reviews recent techniques in machine learning for the automatic definition of planning knowledge. It has been organized according to the target of the learning process: automatic definition of planning action models and automatic definition of planning control knowledge. In addition, the paper reviews the advances in the related field of reinforcement learning. {\\textcopyright} 2012 Cambridge University Press.},\nauthor = {Jim{\\'{e}}nez, Sergio and {De La Rosa}, Tom{\\'{a}}s and Fern{\\'{a}}ndez, Susana and Fern{\\'{a}}ndez, Fernando and Borrajo, Daniel},\ndoi = {10.1017/S026988891200001X},\nfile = {:home/fernando/papers/tmp/review{\\_}of{\\_}machine{\\_}learning{\\_}for{\\_}automated{\\_}planning.pdf:pdf},\nissn = {0269-8889},\njournal = {The Knowledge Engineering Review},\nmonth = {dec},\nnumber = {04},\npages = {433--467},\ntitle = {{A review of machine learning for automated planning}},\nurl = {http://www.journals.cambridge.org/abstract{\\_}S026988891200001X},\nvolume = {27},\nyear = {2012}\n}\n
\n
\n\n\n
\n Recent discoveries in automated planning are broadening the scope of planners, from toy problems to real applications. However, applying automated planners to real-world problems is far from simple. On the one hand, the definition of accurate action models for planning is still a bottleneck. On the other hand, off-the-shelf planners fail to scale-up and to provide good solutions in many domains. In these problematic domains, planners can exploit domain-specific control knowledge to improve their performance in terms of both speed and quality of the solutions. However, manual definition of control knowledge is quite difficult. This paper reviews recent techniques in machine learning for the automatic definition of planning knowledge. It has been organized according to the target of the learning process: automatic definition of planning action models and automatic definition of planning control knowledge. In addition, the paper reviews the advances in the related field of reinforcement learning. © 2012 Cambridge University Press.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Multi-agent Reinforcement Learning for Simulating Pedestrian Navigation.\n \n \n \n \n\n\n \n Martinez-Gil, F.; Lozano, M.; and Fern?ndez, F.\n\n\n \n\n\n\n In
Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), volume 7113 LNAI, pages 54–69. 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@incollection{Martinez-Gil2012a,\nabstract = {In this paper we introduce a Multi-agent system that uses Reinforcement Learning (RL) techniques to learn local navigational behaviors to simulate virtual pedestrian groups. The aim of the paper is to study empirically the validity of RL to learn agent-based navigation controllers and their transfer capabilities when they are used in simulation environments with a higher number of agents than in the learned scenario. Two RL algorithms which use Vector Quantization (VQ) as the generalization method for the space state are presented. Both strategies are focused on obtaining a good vector quantizier that generalizes adequately the state space of the agents. We empirically state the convergence of both methods in our navigational Multi-agent learning domain. Besides, we use validation tools of pedestrian models to analyze the simulation results in the context of pedestrian dynamics. The simulations carried out, scaling up the number of agents in our environment (a closed room with a door through which the agents have to leave), have revealed that the basic characteristics of pedestrian movements have been learned. {\\textcopyright} 2012 Springer-Verlag Berlin Heidelberg.},\nauthor = {Martinez-Gil, Francisco and Lozano, Miguel and Fern?ndez, Fernando},\nbooktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\ndoi = {10.1007/978-3-642-28499-1_4},\nfile = {:home/fernando/papers/tmp/10.1007{\\%}2F978-3-642-28499-1{\\_}4.pdf:pdf},\nisbn = {9783642284984},\nissn = {03029743},\npages = {54--69},\ntitle = {{Multi-agent Reinforcement Learning for Simulating Pedestrian Navigation}},\nurl = {http://link.springer.com/10.1007/978-3-642-28499-1{\\_}4},\nvolume = {7113 LNAI},\nyear = {2012}\n}\n
\n
\n\n\n
\n In this paper we introduce a Multi-agent system that uses Reinforcement Learning (RL) techniques to learn local navigational behaviors to simulate virtual pedestrian groups. The aim of the paper is to study empirically the validity of RL to learn agent-based navigation controllers and their transfer capabilities when they are used in simulation environments with a higher number of agents than in the learned scenario. Two RL algorithms which use Vector Quantization (VQ) as the generalization method for the space state are presented. Both strategies are focused on obtaining a good vector quantizier that generalizes adequately the state space of the agents. We empirically state the convergence of both methods in our navigational Multi-agent learning domain. Besides, we use validation tools of pedestrian models to analyze the simulation results in the context of pedestrian dynamics. The simulations carried out, scaling up the number of agents in our environment (a closed room with a door through which the agents have to leave), have revealed that the basic characteristics of pedestrian movements have been learned. © 2012 Springer-Verlag Berlin Heidelberg.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n A META-TOOL TO SUPPORT THE DEVELOPMENT OF KNOWLEDGE ENGINEERING METHODOLOGIES AND PROJECTS.\n \n \n \n \n\n\n \n FLÓREZ, J. E.; CARBÓ, J.; and FERNÁNDEZ, F.\n\n\n \n\n\n\n
International Journal of Software Engineering and Knowledge Engineering, 22(08): 1055–1083. dec 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{FLOREZ2012,\nabstract = {Knowledge-based systems (KBSs) or expert systems (ESs) are able to solve problems generally through the application of knowledge representing a domain and a set of inference rules. In knowledge engineering (KE), the use of KBSs in the real world, three principal disadvantages have been encountered. First, the knowledge acquisition process has a very high cost in terms of money and time. Second, processing information provided by experts is often difficult and tedious. Third, the establishment of mark times associated with each project phase is difficult due to the complexity described in the previous two points. In response to these obstacles, many methodologies have been developed, most of which include a tool to support the application of the given methodology. Nevertheless, there are advantages and disadvantages inherent in KE methodologies, as well. For instance, particular phases or components of certain methodologies seem to be better equipped than others to respond to a given problem. However, since KE tools currently available support just one methodology the joint use of these phases or components from different methodologies for the solution of a particular problem is hindered. This paper presents KEManager, a generic meta-tool that facilitates the definition and combined application of phases or components from different methodologies. Although other methodologies could be defined and combined in the KEManager, this paper focuses on the combination of two well-known KE methodologies, CommonKADS and IDEAL, together with the most commonly-applied knowledge acquisition methods. The result is an example of the ad hoc creation of a new methodology from pre-existing methodologies, allowing for the adaptation of the KE process to an organization or domain-specific characteristics. The tool was evaluated by students at Carlos III University of Madrid (Spain). {\\textcopyright} 2012 World Scientific Publishing Company.},\nauthor = {FL{\\'{O}}REZ, JOS{\\'{E}} ELOY and CARB{\\'{O}}, JAVIER and FERN{\\'{A}}NDEZ, FERNANDO},\ndoi = {10.1142/S0218194012500283},\nfile = {:home/fernando/papers/tmp/s0218194012500283.pdf:pdf},\nissn = {0218-1940},\njournal = {International Journal of Software Engineering and Knowledge Engineering},\nkeywords = {CommonKADS,IDEAL,Knowledge engineering,expert systems,knowledge-based systems,software tools},\nmonth = {dec},\nnumber = {08},\npages = {1055--1083},\ntitle = {{A META-TOOL TO SUPPORT THE DEVELOPMENT OF KNOWLEDGE ENGINEERING METHODOLOGIES AND PROJECTS}},\nurl = {http://www.worldscientific.com/doi/abs/10.1142/S0218194012500283},\nvolume = {22},\nyear = {2012}\n}\n
\n
\n\n\n
\n Knowledge-based systems (KBSs) or expert systems (ESs) are able to solve problems generally through the application of knowledge representing a domain and a set of inference rules. In knowledge engineering (KE), the use of KBSs in the real world, three principal disadvantages have been encountered. First, the knowledge acquisition process has a very high cost in terms of money and time. Second, processing information provided by experts is often difficult and tedious. Third, the establishment of mark times associated with each project phase is difficult due to the complexity described in the previous two points. In response to these obstacles, many methodologies have been developed, most of which include a tool to support the application of the given methodology. Nevertheless, there are advantages and disadvantages inherent in KE methodologies, as well. For instance, particular phases or components of certain methodologies seem to be better equipped than others to respond to a given problem. However, since KE tools currently available support just one methodology the joint use of these phases or components from different methodologies for the solution of a particular problem is hindered. This paper presents KEManager, a generic meta-tool that facilitates the definition and combined application of phases or components from different methodologies. Although other methodologies could be defined and combined in the KEManager, this paper focuses on the combination of two well-known KE methodologies, CommonKADS and IDEAL, together with the most commonly-applied knowledge acquisition methods. The result is an example of the ad hoc creation of a new methodology from pre-existing methodologies, allowing for the adaptation of the KE process to an organization or domain-specific characteristics. The tool was evaluated by students at Carlos III University of Madrid (Spain). © 2012 World Scientific Publishing Company.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n A kinodynamic planning-learning algorithm for complex robot motor control.\n \n \n \n \n\n\n \n Gonzalez-Quijano, J.; Abderrahim, M.; Fernandez, F.; and Bensalah, C.\n\n\n \n\n\n\n In
2012 IEEE Conference on Evolving and Adaptive Intelligent Systems, pages 80–83, may 2012. IEEE\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{Gonzalez-Quijano2012,\nabstract = {Robot motor control learning is currently one of the most active research areas in robotics. Many learning techniques have been developed for relatively simple problems. However, very few of them have direct applicability in complex robotic systems without assuming prior knowledge about the task, mainly due to three facts. Firstly, they scale badly to continues and high dimensional problems. Secondly, they need too many real robot-environment interactions. Finally, they are not capable of adapting to environment or robot dynamic changes. In order to overcome these problems, we have developed a new algorithm capable of finding from scratch open-loop state-action trajectory solutions by mixing sample-based tree kinodynamic planning with dynamic model learning. Some results demonstrating the viability of this new type of approach in the cart-pole swing-up task problem are presented. {\\textcopyright} 2012 IEEE.},\nauthor = {Gonzalez-Quijano, Javier and Abderrahim, Mohamed and Fernandez, Fernando and Bensalah, Choukri},\nbooktitle = {2012 IEEE Conference on Evolving and Adaptive Intelligent Systems},\ndoi = {10.1109/EAIS.2012.6232809},\nfile = {:home/fernando/papers/tmp/06232809.pdf:pdf},\nisbn = {978-1-4673-1727-6},\nmonth = {may},\npages = {80--83},\npublisher = {IEEE},\ntitle = {{A kinodynamic planning-learning algorithm for complex robot motor control}},\nurl = {http://ieeexplore.ieee.org/document/6232809/},\nyear = {2012}\n}\n
\n
\n\n\n
\n Robot motor control learning is currently one of the most active research areas in robotics. Many learning techniques have been developed for relatively simple problems. However, very few of them have direct applicability in complex robotic systems without assuming prior knowledge about the task, mainly due to three facts. Firstly, they scale badly to continues and high dimensional problems. Secondly, they need too many real robot-environment interactions. Finally, they are not capable of adapting to environment or robot dynamic changes. In order to overcome these problems, we have developed a new algorithm capable of finding from scratch open-loop state-action trajectory solutions by mixing sample-based tree kinodynamic planning with dynamic model learning. Some results demonstrating the viability of this new type of approach in the cart-pole swing-up task problem are presented. © 2012 IEEE.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Calibrating a Motion Model Based on Reinforcement Learning for Pedestrian Simulation.\n \n \n \n \n\n\n \n Martinez-Gil, F.; Lozano, M.; and Fern?ndez, F.\n\n\n \n\n\n\n In
Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics), volume 7660 LNCS, pages 302–313. 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n\n\n\n
\n
@incollection{Martinez-Gil2012b,\nabstract = {In this paper, the calibration of a framework based in Multi-agent Reinforcement Learning (RL) for generating motion simulations of pedestrian groups is presented. The framework sets a group of autonomous embodied agents that learn to control individually its instant velocity vector in scenarios with collisions and friction forces. The result of the process is a different learned motion controller for each agent. The calibration of both, the physical properties involved in the motion of our embodied agents and the corresponding dynamics, is an important issue for a realistic simulation. The physics engine used has been calibrated with values taken from real pedestrian dynamics. Two experiments have been carried out for testing this approach. The results of the experiments are compared with databases of real pedestrians in similar scenarios. As a comparison tool, the diagram of speed versus density, known as fundamental diagram in the literature, is used. {\\textcopyright} 2012 Springer-Verlag Berlin Heidelberg.},\nauthor = {Martinez-Gil, Francisco and Lozano, Miguel and Fern?ndez, Fernando},\nbooktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},\ndoi = {10.1007/978-3-642-34710-8_28},\nfile = {:home/fernando/papers/tmp/10.1007{\\%}2F978-3-642-34710-8{\\_}28.pdf:pdf},\nisbn = {9783642347092},\nissn = {03029743},\nkeywords = {Pedestrian motion learning,Reinforcement Learning},\npages = {302--313},\ntitle = {{Calibrating a Motion Model Based on Reinforcement Learning for Pedestrian Simulation}},\nurl = {http://link.springer.com/10.1007/978-3-642-34710-8{\\_}28},\nvolume = {7660 LNCS},\nyear = {2012}\n}\n
\n
\n\n\n
\n In this paper, the calibration of a framework based in Multi-agent Reinforcement Learning (RL) for generating motion simulations of pedestrian groups is presented. The framework sets a group of autonomous embodied agents that learn to control individually its instant velocity vector in scenarios with collisions and friction forces. The result of the process is a different learned motion controller for each agent. The calibration of both, the physical properties involved in the motion of our embodied agents and the corresponding dynamics, is an important issue for a realistic simulation. The physics engine used has been calibrated with values taken from real pedestrian dynamics. Two experiments have been carried out for testing this approach. The results of the experiments are compared with databases of real pedestrians in similar scenarios. As a comparison tool, the diagram of speed versus density, known as fundamental diagram in the literature, is used. © 2012 Springer-Verlag Berlin Heidelberg.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n A prototype-based method for classification with time constraints: a case study on automated planning.\n \n \n \n \n\n\n \n García-Durán, R.; Fernández, F.; and Borrajo, D.\n\n\n \n\n\n\n
Pattern Analysis and Applications, 15(3): 261–277. aug 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Garcia-Duran2012,\nabstract = {The main goal of Nearest Prototype Classification is to reduce storage space and retrieval time of classical Instance-Based Learning (IBL) algorithms. This motivation is higher in relational data since relational distance metrics are much more expensive to compute than classical distances like Euclidean distance. In this paper, we present an algorithm to build Relational Nearest Prototype Classifiers (RNPCs). When compared with Relational Instance-Based Learning (Relational IBL or RIBL) approaches, the algorithm is able to dramatically reduce the number of instances by selecting the most relevant prototypes, maintaining similar accuracy. The number of prototypes is obtained automatically by the algorithm, although it can also be bound by the user. In this work, we also show an application of RNPC for automated planning. Specifically, we describe a modeling task where a relational policy is built following an IBL approach. This approach uses the decisions taken by a planning system as learning examples. We show that when the number of learning examples is reduced with RNPC, the resulting policy is able to scale up better than the original planning system. {\\textcopyright} 2010 Springer-Verlag London Limited.},\nauthor = {Garc{\\'{i}}a-Dur{\\'{a}}n, Roc{\\'{i}}o and Fern{\\'{a}}ndez, Fernando and Borrajo, Daniel},\ndoi = {10.1007/s10044-010-0194-6},\nfile = {:home/fernando/papers/tmp/10.1007{\\%}2Fs10044-010-0194-6.pdf:pdf},\nissn = {1433-7541},\njournal = {Pattern Analysis and Applications},\nkeywords = {Automated planning,Nearest prototype classification,Relational instance-based learning,Relational learning},\nmonth = {aug},\nnumber = {3},\npages = {261--277},\ntitle = {{A prototype-based method for classification with time constraints: a case study on automated planning}},\nurl = {http://link.springer.com/10.1007/s10044-010-0194-6},\nvolume = {15},\nyear = {2012}\n}\n
\n
\n\n\n
\n The main goal of Nearest Prototype Classification is to reduce storage space and retrieval time of classical Instance-Based Learning (IBL) algorithms. This motivation is higher in relational data since relational distance metrics are much more expensive to compute than classical distances like Euclidean distance. In this paper, we present an algorithm to build Relational Nearest Prototype Classifiers (RNPCs). When compared with Relational Instance-Based Learning (Relational IBL or RIBL) approaches, the algorithm is able to dramatically reduce the number of instances by selecting the most relevant prototypes, maintaining similar accuracy. The number of prototypes is obtained automatically by the algorithm, although it can also be bound by the user. In this work, we also show an application of RNPC for automated planning. Specifically, we describe a modeling task where a relational policy is built following an IBL approach. This approach uses the decisions taken by a planning system as learning examples. We show that when the number of learning examples is reduced with RNPC, the resulting policy is able to scale up better than the original planning system. © 2010 Springer-Verlag London Limited.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Safe exploration of state and action spaces in reinforcement learning.\n \n \n \n \n\n\n \n Garcia, J.; and Fernández, F.\n\n\n \n\n\n\n
Journal of Artificial Intelligence Research, 45. 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{Garcia2012,\nabstract = {In this paper, we consider the important problem of safe exploration in reinforcement learning. While reinforcement learning is well-suited to domains with complex transition dynamics and high-dimensional state-action spaces, an additional challenge is posed by the need for safe and efficient exploration. Traditional exploration techniques are not particularly useful for solving dangerous tasks, where the trial and error process may lead to the selection of actions whose execution in some states may result in damage to the learning system (or any other system). Consequently, when an agent begins an interaction with a dangerous and high-dimensional state-action space, an important question arises; namely, that of how to avoid (or at least minimize) damage caused by the exploration of the state-action space. We introduce the PI-SRL algorithm which safely improves suboptimal albeit robust behaviors for continuous state and action control tasks and which efficiently learns from the experience gained from the environment. We evaluate the proposed method in four complex tasks: automatic car parking, pole-balancing, helicopter hovering, and business management. {\\textcopyright} 2012 AI Access Foundation.},\nauthor = {Garcia, J. and Fern{\\'{a}}ndez, Fernando},\ndoi = {10.1613/jair.3761},\nfile = {:home/fernando/papers/tmp/live-3761-6687-jair.pdf:pdf},\nissn = {10769757},\njournal = {Journal of Artificial Intelligence Research},\ntitle = {{Safe exploration of state and action spaces in reinforcement learning}},\nurl = {http://jair.org/papers/paper3761.html},\nvolume = {45},\nyear = {2012}\n}\n
\n
\n\n\n
\n In this paper, we consider the important problem of safe exploration in reinforcement learning. While reinforcement learning is well-suited to domains with complex transition dynamics and high-dimensional state-action spaces, an additional challenge is posed by the need for safe and efficient exploration. Traditional exploration techniques are not particularly useful for solving dangerous tasks, where the trial and error process may lead to the selection of actions whose execution in some states may result in damage to the learning system (or any other system). Consequently, when an agent begins an interaction with a dangerous and high-dimensional state-action space, an important question arises; namely, that of how to avoid (or at least minimize) damage caused by the exploration of the state-action space. We introduce the PI-SRL algorithm which safely improves suboptimal albeit robust behaviors for continuous state and action control tasks and which efficiently learns from the experience gained from the environment. We evaluate the proposed method in four complex tasks: automatic car parking, pole-balancing, helicopter hovering, and business management. © 2012 AI Access Foundation.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n An Online Utility-Based Approach for Sampling Dynamic Ocean Fields.\n \n \n \n \n\n\n \n Garcia-Olaya, A.; Py, F.; Das, J.; and Rajan, K.\n\n\n \n\n\n\n
IEEE Journal of Oceanic Engineering, 37(2): 185–203. apr 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{Garcia-Olaya2012,\nabstract = {The coastal ocean is a dynamic and complex environment due to the confluence of atmospheric, oceanographic, estuarine/riverine, and land-sea interactions. Yet it continues to be undersampled, resulting in poor understanding of dynamic, episodic, and complex phenomena such as harmful algal blooms, anoxic zones, coastal plumes, thin layers, and frontal zones. Often these phenomena have no viable biological or computational models that can provide guidance for sampling. Returning targeted water samples for analysis becomes critical for biologists to assimilate data for model synthesis. In our work, the scientific emphasis on building a species distribution model necessitates spatially distributed sample collection from within hotspots in a large volume of a dynamic field of interest. To do so, we propose an autonomous approach to sample acquisition based on an online calculation of sample utility. A series of reward functions provide a balance between temporal and spatial scales of oceanographic sampling and do so in such a way that science preferences or evolving knowledge about the feature of interest can be incorporated in the decision process. This utility calculation is undertaken onboard a powered autonomous underwater vehicle (AUV) with specialized water samplers for the upper water column. For validation, we provide experimental results using archival AUV data along with an at-sea demonstration in Monterey Bay, CA.},\nauthor = {Garcia-Olaya, Angel and Py, Fr{\\'{e}}d{\\'{e}}ric and Das, Jnaneshwar and Rajan, Kanna},\ndoi = {10.1109/JOE.2012.2183934},\nisbn = {0364-9059},\nissn = {0364-9059},\njournal = {IEEE Journal of Oceanic Engineering},\nmendeley-groups = {Papers/2018-OSP},\nmonth = {apr},\nnumber = {2},\npages = {185--203},\ntitle = {{An Online Utility-Based Approach for Sampling Dynamic Ocean Fields}},\nurl = {http://ieeexplore.ieee.org/document/6168799/},\nvolume = {37},\nyear = {2012}\n}\n
\n
\n\n\n
\n The coastal ocean is a dynamic and complex environment due to the confluence of atmospheric, oceanographic, estuarine/riverine, and land-sea interactions. Yet it continues to be undersampled, resulting in poor understanding of dynamic, episodic, and complex phenomena such as harmful algal blooms, anoxic zones, coastal plumes, thin layers, and frontal zones. Often these phenomena have no viable biological or computational models that can provide guidance for sampling. Returning targeted water samples for analysis becomes critical for biologists to assimilate data for model synthesis. In our work, the scientific emphasis on building a species distribution model necessitates spatially distributed sample collection from within hotspots in a large volume of a dynamic field of interest. To do so, we propose an autonomous approach to sample acquisition based on an online calculation of sample utility. A series of reward functions provide a balance between temporal and spatial scales of oceanographic sampling and do so in such a way that science preferences or evolving knowledge about the feature of interest can be incorporated in the decision process. This utility calculation is undertaken onboard a powered autonomous underwater vehicle (AUV) with specialized water samplers for the upper water column. For validation, we provide experimental results using archival AUV data along with an at-sea demonstration in Monterey Bay, CA.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n A Survey of the Seventh International Planning Competition.\n \n \n \n \n\n\n \n Coles, A.; Coles, A.; García-Olaya, A.; Jiménez, S.; Linares López, C.; Sanner, S.; and Yoon, S.\n\n\n \n\n\n\n
AI Magazine, 33(1): 83–88. 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n \n doi\n \n \n\n \n link\n \n \n\n bibtex\n \n\n \n \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{aim12,\nabstract = {In this article we review the 2011 Interna- tional Planning Competition. We give an overview of the history of the competition, dis- cussing how it has developed since its first edi- tion in 1998. The 2011 competition was run in three main separate tracks: the deterministic (classical) track; the learning track; and the uncertainty track. Each track proposed its own distinct set of new challenges and the partici- pants rose to these admirably, the results of each track showing promising progress in each area. The competition attracted a record num- ber of participants this year, showing its con- tinued and strong position as a major central pillar of the international planning research community.},\nauthor = {Coles, Amanda and Coles, Andrew and Garc{\\'{i}}a-Olaya, Angel and Jim{\\'{e}}nez, Sergio and {Linares L{\\'{o}}pez}, Carlos and Sanner, Scott and Yoon, Sungwook},\ndoi = {10.1609/aimag.v33i1.2392},\nfile = {:C$\\backslash$:/Users/angel/Documents/Mendeley Desktop/Coles et al. - 2012 - A Survey of the Seventh International Planning Competition.pdf:pdf},\nissn = {07384602},\njournal = {AI Magazine},\nmendeley-groups = {Papers/2018-OSP},\nnumber = {1},\npages = {83--88},\ntitle = {{A Survey of the Seventh International Planning Competition}},\nurl = {http://www.aaai.org/ojs/index.php/aimagazine/issue/view/197/showToc http://www.aaai.org/ojs/index.php/aimagazine/article/view/2392 http://www.scopus.com/inward/record.url?eid=2-s2.0-84861350540{\\&}partnerID=MN8TOARS},\nvolume = {33},\nyear = {2012}\n}\n\n\n\n
\n
\n\n\n
\n In this article we review the 2011 Interna- tional Planning Competition. We give an overview of the history of the competition, dis- cussing how it has developed since its first edi- tion in 1998. The 2011 competition was run in three main separate tracks: the deterministic (classical) track; the learning track; and the uncertainty track. Each track proposed its own distinct set of new challenges and the partici- pants rose to these admirably, the results of each track showing promising progress in each area. The competition attracted a record num- ber of participants this year, showing its con- tinued and strong position as a major central pillar of the international planning research community.\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n A Review of Machine Learning for Automated Planning.\n \n \n \n \n\n\n \n Jiménez, S.; de la Rosa, T.; Fernández, S.; Fernández, F.; and Borrajo, D.\n\n\n \n\n\n\n
The Knowledge Engineering Review, 27(4): 433–467. December 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{kereview-ml,\n\n author = {Sergio Jiménez and Tomás de la Rosa and Susana Fernández and Fernando Fernández and Daniel Borrajo},\n\n journal = {The Knowledge Engineering Review},\n\n title = {A Review of Machine Learning for Automated Planning},\n\n publisher = {Cambridge University Press},\n\n year = {2012},\n\n key = {Planning-Learning},\n\n url = {http://dx.doi.org/10.1017/S026988891200001X},\n\n editor = {},\n\n volume = {27},\n\n number = {4},\n\n series = {},\n\n address = {},\n\n month = {December},\n\n pages = {433--467},\n\n cicyt = {revista},\n\n note = {},\n\n jcr = {Q4, 2012: 0.590 (91/115)},\n\n optjcr = {2004: 1.237 (28/78), 2005: 2.179 (16/79), 2006: 0.930 (43/85), 2007: 1.312 (35/93)},\n\n optannote = {ISSN: 0269-8889}\n\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n A Prototype-Based Method for Classification with Time Constraints: A Case Study on Automated Planning.\n \n \n \n \n\n\n \n García-Durán, R.; Fernández, F.; and Borrajo, D.\n\n\n \n\n\n\n
Pattern Analysis and Applications Journal, 15(3): 261–277. 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{paa12,\n\n author = {Rocío García-Durán and Fernando Fernández and Daniel Borrajo},\n\n title = {A Prototype-Based Method for Classification with Time Constraints: A Case Study on Automated Planning},\n\n journal = {Pattern Analysis and Applications Journal},\n\n year = {2012},\n\n url = {http://dx.doi.org/10.1007/s10044-010-0194-6},\n\n key = {Planning-Learning},\n\n cicyt = {revista},\n\n publisher = {Springer},\n\n jcr = {Q3, 2012: 0.814 (78/115)},\n\n volume = {15},\n\n number = {3},\n\n month = {},\n\n pages = {261--277},\n\n note = {}\n\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n Probabilistically Reusing Plans in Deterministic Planning.\n \n \n \n\n\n \n Borrajo, D.; and Veloso, M.\n\n\n \n\n\n\n In , editor(s),
Proceedings of ICAPS'12 workshop on Heuristics and Search for Domain-Independent Planning, pages 17-25, Atibaia (Brazil), 2012. \n
\n\n
\n\n
\n\n
\n\n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{workshop-icaps12-errtplan,\n\n author = {Daniel Borrajo and Manuela Veloso},\n\n booktitle = {Proceedings of ICAPS'12 workshop on Heuristics and Search for Domain-Independent Planning},\n\n title = {Probabilistically Reusing Plans in Deterministic Planning},\n\n publisher = {},\n\n year = {2012},\n\n key = {Planning-Learning},\n\n myurl = {http://icaps12.icaps-conference.org/workshops/hsdip2012-proceedings.pdf},\n\n editor = {},\n\n volume = {},\n\n series = {},\n\n address = {Atibaia (Brazil)},\n\n month = {},\n\n pages = {17-25},\n\n cicyt = {workshops},\n\n note = {},\n\n jcr = {}\n\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n PELEA: a Domain-Independent Architecture for Planning, Execution and Learning.\n \n \n \n \n\n\n \n Guzmán, C.; Alcázar, V.; Prior, D.; Onaindía, E.; Borrajo, D.; Fdez-Olivares, J.; and Quintero, E.\n\n\n \n\n\n\n In , editor(s),
Proceedings of ICAPS'12 Scheduling and Planning Applications woRKshop (SPARK), pages 38-45, Atibaia (Brazil), 2012. AAAI Press\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{spark12,\n\n author = {César Guzmán and Vidal Alcázar and David Prior and Eva Onaindía and Daniel Borrajo and Juan Fdez-Olivares and Ezequiel Quintero},\n\n booktitle = {Proceedings of ICAPS'12 Scheduling and Planning Applications woRKshop (SPARK)},\n\n title = {{PELEA}: a Domain-Independent Architecture for Planning, Execution and Learning},\n\n publisher = {AAAI Press},\n\n year = {2012},\n\n key = {Planning-Learning},\n\n url = {spark12.pdf},\n\n editor = {},\n\n volume = {},\n\n series = {},\n\n address = {Atibaia (Brazil)},\n\n month = {},\n\n pages = {38-45},\n\n cicyt = {workshops},\n\n note = {},\n\n jcr = {}\n\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Variable resolution planning through predicate relaxation.\n \n \n \n \n\n\n \n Martínez, M.; Fernández, F.; and Borrajo, D.\n\n\n \n\n\n\n In , editor(s),
Proceedings of ICAPS'12 workshop on Planning and Plan Execution for Real-World Systems: Principles and Practices (PlanEx), pages 5–12, Atibaia (Brazil), 2012. \n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{planex2012,\n\n author = {Moisés Martínez and Fernando Fernández and Daniel Borrajo},\n\n booktitle = {Proceedings of ICAPS'12 workshop on Planning and Plan Execution for Real-World Systems: Principles and Practices (PlanEx)},\n\n title = {Variable resolution planning through predicate relaxation},\n\n publisher = {},\n\n year = {2012},\n\n key = {Planning-Learning},\n\n url = {planex2012.pdf},\n\n editor = {},\n\n volume = {},\n\n series = {},\n\n address = {Atibaia (Brazil)},\n\n month = {},\n\n pages = {5--12},\n\n cicyt = {workshops},\n\n note = {},\n\n jcr = {}\n\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Modeling Motivations, Personality Traits and Emotional States in Deliberative Agents Based on Automated Planning.\n \n \n \n \n\n\n \n Pérez, D.; Fernández, S.; and Borrajo, D.\n\n\n \n\n\n\n In Filipe, J.; and Fred, A., editor(s),
3rd International Conference on Agents and Artificial Intelligence (ICAART 2011), volume CCIS 271, of
Lecture Notes on Communications in Computer and Information Science, pages 146–160, Heidelberg, 2012. Springer Verlag\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@inproceedings{icaart12-ln,\n\n author = {Daniel Pérez and Susana Fernández and Daniel Borrajo},\n\n title = {Modeling Motivations, Personality Traits and Emotional States in Deliberative Agents Based on Automated Planning},\n\n booktitle = {3rd International Conference on Agents and Artificial Intelligence (ICAART 2011)},\n\n optcrossref = {},\n\n publisher = {Springer Verlag},\n\n series = {Lecture Notes on Communications in Computer and Information Science},\n\n key = {Planning-Learning},\n\n editor = {J. Filipe and A. Fred},\n\n volume = {CCIS 271},\n\n optnumber = {},\n\n url = {icaart12-ln.pdf},\n\n year = {2012},\n\n cicyt = {lncs},\n\n organization = {},\n\n address = {Heidelberg},\n\n month = {},\n\n pages = {146--160},\n\n note = {},\n\n optannote = {},\n\n jcr = {C}\n\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n
\n
\n\n \n \n \n \n \n \n Using Linear Programming to solve Clustered Oversubscription Planning Problems for Designing e-Courses.\n \n \n \n \n\n\n \n Fernández, S.; and Borrajo, D.\n\n\n \n\n\n\n
Expert Systems with Applications, 39(5): 5178–5188. April 2012.\n
\n\n
\n\n
\n\n
\n\n \n \n Paper\n \n \n\n \n\n \n link\n \n \n\n bibtex\n \n\n \n\n \n \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n \n \n \n\n\n\n
\n
@article{expertsystems12,\n\n author = {Susana Fernández and Daniel Borrajo},\n\n title = {Using Linear Programming to solve Clustered Oversubscription Planning Problems for Designing e-Courses},\n\n journal = {Expert Systems with Applications},\n\n year = {2012},\n\n publisher = {Elsevier},\n\n key = {Planning-Learning},\n\n url = {http://dx.doi.org/10.1016/j.eswa.2011.11.021},\n\n volume = {39},\n\n number = {5},\n\n month = {April},\n\n pages = {5178--5188},\n\n cicyt = {revista},\n\n jcr = {Q1, 2012: 1.854 (31/115) En Categoría Engineering, Electrical \\& Electronic 56/243, En Categoría\n\n Operations Research \\& Management Science 13/79},\n\n optjcr = {2004: 1.247 (26/78), 2005: 1.236 (32/79), 2006: 0.957 (41/85), 2007: 1.177 (40/93), 2008: 2.596 (17/94), En Categoría Operations Research \\& Management Science:\n\n 2007 (11/60), 2008 (1/64)},\n\n note = {},\n\n optannote = {ISSN: 0957-4174}\n\n}\n\n\n\n
\n
\n\n\n\n
\n\n\n\n\n\n