var bibbase_data = {"data":"\"Loading..\"\n\n
\n\n \n\n \n\n \n \n\n \n\n \n \n\n \n\n \n
\n generated by\n \n \"bibbase.org\"\n\n \n
\n \n\n
\n\n \n\n\n
\n\n Excellent! Next you can\n create a new website with this list, or\n embed it in an existing web page by copying & pasting\n any of the following snippets.\n\n
\n JavaScript\n (easiest)\n
\n \n <script src=\"https://bibbase.org/show?bib=https%3A%2F%2Fpeople.orie.cornell.edu%2Fshane%2FShanePubs.bib&jsonp=1&jsonp=1\"></script>\n \n
\n\n PHP\n
\n \n <?php\n $contents = file_get_contents(\"https://bibbase.org/show?bib=https%3A%2F%2Fpeople.orie.cornell.edu%2Fshane%2FShanePubs.bib&jsonp=1\");\n print_r($contents);\n ?>\n \n
\n\n iFrame\n (not recommended)\n
\n \n <iframe src=\"https://bibbase.org/show?bib=https%3A%2F%2Fpeople.orie.cornell.edu%2Fshane%2FShanePubs.bib&jsonp=1\"></iframe>\n \n
\n\n

\n For more details see the documention.\n

\n
\n
\n\n
\n\n This is a preview! To use this list on your own web site\n or create a new web site from it,\n create a free account. The file will be added\n and you will be able to edit it in the File Manager.\n We will show you instructions once you've created your account.\n
\n\n
\n\n

To the site owner:

\n\n

Action required! Mendeley is changing its\n API. In order to keep using Mendeley with BibBase past April\n 14th, you need to:\n

    \n
  1. renew the authorization for BibBase on Mendeley, and
  2. \n
  3. update the BibBase URL\n in your page the same way you did when you initially set up\n this page.\n
  4. \n
\n

\n\n

\n \n \n Fix it now\n

\n
\n\n
\n\n\n
\n \n \n
\n
\n  \n 2024\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n The Effect of Network Skewness on Epidemic Spread in Communities.\n \n \n \n\n\n \n Cornwell, B. T.; Ji, S.; Henderson, S. G.; and Meredith, G.\n\n\n \n\n\n\n 2024.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{coretal24,\n\tauthor = {Benjamin T. Cornwell and Shiyu Ji and Shane G. Henderson and Gen Meredith},\n\tdate-added = {2024-03-18 07:50:12 +1030},\n\tdate-modified = {2024-03-18 07:50:54 +1030},\n\ttitle = {The Effect of Network Skewness on Epidemic Spread in Communities},\n\tyear = {2024}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Modeling the Risk of In-Person Instruction during the COVID-19 Pandemic.\n \n \n \n\n\n \n Liu, B.; Zhang, Y.; Henderson, S. G.; Shmoys, D. B.; and Frazier, P. I.\n\n\n \n\n\n\n INFORMS Journal on Applied Analytics,To appear. 2024.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{liuetal23,\n\tabstract = {During the COVID-19 pandemic, implementing in-person indoor instruction in a safe manner was a high\npriority for universities nationwide. To support this effort at the University, we developed a mathematical\nmodel for estimating the risk of SARS-CoV-2 transmission in university classrooms. This model was used\nto design a safe classroom environment at the University during the COVID-19 pandemic that supported\nthe higher occupancy levels needed to match pre-pandemic numbers of in-person courses, despite a limited\nnumber of large classrooms. A retrospective analysis at the end of the semester confirmed the model's\nassessment that the proposed classroom configuration would be safe. Our framework is generalizable and\nwas also used to support reopening decisions at Stanford University. In addition, our methods are flexible;\nour modeling framework was repurposed to plan for large university events and gatherings. We found that\nour approach and methods work in a wide range of indoor settings and could be used to support reopening\nplanning across various industries, from secondary schools to movie theaters and restaurants.},\n\tauthor = {Brian Liu and Yujia Zhang and Shane G. Henderson and David B. Shmoys and Peter I. Frazier},\n\tdate-added = {2023-10-07 07:44:15 -0400},\n\tdate-modified = {2024-04-09 12:10:01 -0500},\n\tjournal = {{INFORMS} Journal on Applied Analytics},\n\tpages = {To appear},\n\ttitle = {Modeling the Risk of In-Person Instruction during the COVID-19 Pandemic},\n\tyear = {2024}}\n\n
\n
\n\n\n
\n During the COVID-19 pandemic, implementing in-person indoor instruction in a safe manner was a high priority for universities nationwide. To support this effort at the University, we developed a mathematical model for estimating the risk of SARS-CoV-2 transmission in university classrooms. This model was used to design a safe classroom environment at the University during the COVID-19 pandemic that supported the higher occupancy levels needed to match pre-pandemic numbers of in-person courses, despite a limited number of large classrooms. A retrospective analysis at the end of the semester confirmed the model's assessment that the proposed classroom configuration would be safe. Our framework is generalizable and was also used to support reopening decisions at Stanford University. In addition, our methods are flexible; our modeling framework was repurposed to plan for large university events and gatherings. We found that our approach and methods work in a wide range of indoor settings and could be used to support reopening planning across various industries, from secondary schools to movie theaters and restaurants.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Community first response for cardiac arrest: comparing phased dispatch policies through Monte Carlo simulation.\n \n \n \n \n\n\n \n van den Berg , P. L.; Henderson, S. G.; Li, H.; Dicker, B.; and Jagtenberg, C. J.\n\n\n \n\n\n\n 2024.\n Under revision\n\n\n\n
\n\n\n\n \n \n \"CommunityPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{beretal24,\n\tabstract = {Background: Advanced Community First Responder (CFR) systems send so-called phased alerts: notifications with built-in time delays. The policy that defines these delays affects response times, CFR workload and the number of redundant CFR arrivals.\n\nMethods: We compare policies by Monte Carlo Simulation, estimating the three metrics above. We bootstrap acceptance probabilities and response delays from 29,307 rows of historical data covering all GoodSAM alerts in New Zealand between 1-12-2017 and 30-11-2020. We simulate distances between the patient and CFRs by assuming that CFRs are located uniformly at random in a 1 km circle around the patient, for different CFR densities. Our simulated CFRs travel with a distance-dependent speed that was estimated by linear regression on observed speeds among those responders in the abovementioned data set that eventually reached the patient.\n\nResults: The alerting policy has a large impact on the expected number of alerts sent, the redundant arrivals and the probability of patient survival. CFR app managers can use our results to identify a policy that displays a desirable trade-off between these performance measures.},\n\tauthor = {Pieter L. {van den Berg} and Shane G. Henderson and Hemeng Li and Bridget Dicker and Caroline J. Jagtenberg},\n\tdate-added = {2023-03-20 16:14:33 -0400},\n\tdate-modified = {2024-01-29 12:21:02 +1300},\n\tnote = {Under revision},\n\ttitle = {Community first response for cardiac arrest: comparing phased dispatch policies through {Monte} {Carlo} simulation},\n\turl = {https://www.medrxiv.org/content/10.1101/2024.01.17.24301457v1},\n\tyear = {2024},\n\tbdsk-url-1 = {https://doi.org/10.1007/s11134-022-09752-z}}\n\n
\n
\n\n\n
\n Background: Advanced Community First Responder (CFR) systems send so-called phased alerts: notifications with built-in time delays. The policy that defines these delays affects response times, CFR workload and the number of redundant CFR arrivals. Methods: We compare policies by Monte Carlo Simulation, estimating the three metrics above. We bootstrap acceptance probabilities and response delays from 29,307 rows of historical data covering all GoodSAM alerts in New Zealand between 1-12-2017 and 30-11-2020. We simulate distances between the patient and CFRs by assuming that CFRs are located uniformly at random in a 1 km circle around the patient, for different CFR densities. Our simulated CFRs travel with a distance-dependent speed that was estimated by linear regression on observed speeds among those responders in the abovementioned data set that eventually reached the patient. Results: The alerting policy has a large impact on the expected number of alerts sent, the redundant arrivals and the probability of patient survival. CFR app managers can use our results to identify a policy that displays a desirable trade-off between these performance measures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling the Impact of Community First Responders.\n \n \n \n \n\n\n \n van den Berg , P. L.; Henderson, S. G.; Jagtenberg, C. J.; and Li, H.\n\n\n \n\n\n\n Management Science. 2024.\n To appear\n\n\n\n
\n\n\n\n \n \n \"Modeling paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{berhenjagli21,\n\tabstract = {In Community First Responder (CFR) systems, traditional emergency service response is augmented by a network of trained volunteers who are dispatched via an app. A central application of such systems is out-of-hospital cardiac arrest (OHCA), where a very fast response is crucial. For a target performance level, how many volunteers are needed and from which locations should they be recruited? We model the presence of volunteers throughout a region as a Poisson point process, which permits the computation of the response-time distribution of the first-arriving volunteer. Combining this with known survival-rate functions, we deduce survival probabilities in the cardiac arrest setting. We then use convex optimization to compute a location distribution of volunteers across the region that optimizes either the fraction of incidents with a fast response (a common measure in the industry) or patient survival in the case of OHCA. The optimal location distribution provides a bound on the best possible performance with a given number of volunteers. This can be used to determine whether introducing a CFR system in a new region is worthwhile, or serve as a guide for additional recruitment in existing systems. Effective target areas for recruitment are not always obvious, since volunteers recruited from one area may be found in various areas across the city depending on the time of day; we explicitly capture this issue. We demonstrate these methods through an extended case study of Auckland, New Zealand.},\n\tauthor = {Pieter L. {van den Berg} and Shane G. Henderson and Caroline J. Jagtenberg and Hemeng Li},\n\tdate-added = {2022-12-21 05:42:30 -0500},\n\tdate-modified = {2024-01-28 11:34:00 +1300},\n\tjournal = {Management Science},\n\tnote = {To appear},\n\ttitle = {Modeling the Impact of Community First Responders},\n\turl_paper = {http://ssrn.com/abstract=3825060},\n\tyear = {2024}}\n\n
\n
\n\n\n
\n In Community First Responder (CFR) systems, traditional emergency service response is augmented by a network of trained volunteers who are dispatched via an app. A central application of such systems is out-of-hospital cardiac arrest (OHCA), where a very fast response is crucial. For a target performance level, how many volunteers are needed and from which locations should they be recruited? We model the presence of volunteers throughout a region as a Poisson point process, which permits the computation of the response-time distribution of the first-arriving volunteer. Combining this with known survival-rate functions, we deduce survival probabilities in the cardiac arrest setting. We then use convex optimization to compute a location distribution of volunteers across the region that optimizes either the fraction of incidents with a fast response (a common measure in the industry) or patient survival in the case of OHCA. The optimal location distribution provides a bound on the best possible performance with a given number of volunteers. This can be used to determine whether introducing a CFR system in a new region is worthwhile, or serve as a guide for additional recruitment in existing systems. Effective target areas for recruitment are not always obvious, since volunteers recruited from one area may be found in various areas across the city depending on the time of day; we explicitly capture this issue. We demonstrate these methods through an extended case study of Auckland, New Zealand.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2023\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Batch Synthesis in High-Dimensional Classification Active Learning via Latent Gradient Optimization.\n \n \n \n\n\n \n Ford, M. T.; Henderson, S. G.; Malof, J.; and Ren, S. (.\n\n\n \n\n\n\n 2023.\n Submitted\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{foretal23,\n\tauthor = {Matthew T. Ford and Shane G. Henderson and Jordan Malof and Simiao (Ben) Ren},\n\tdate-added = {2023-10-17 13:07:34 -0400},\n\tdate-modified = {2023-10-17 13:08:49 -0400},\n\tnote = {Submitted},\n\ttitle = {Batch Synthesis in High-Dimensional Classification Active Learning via Latent Gradient Optimization},\n\tyear = {2023}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Practical impact and academia are not antonyms.\n \n \n \n\n\n \n Henderson, S. G.\n\n\n \n\n\n\n In Corlu, C. G.; Hunter, S. R.; Lam, H.; Onggo, B. S.; Shortle, J.; and Biller, B., editor(s), Proceedings of the 2023 Winter Simulation Conference, pages Submitted, Piscataway, New Jersey, 2023. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hen23,\n\tabstract = {This tutorial discusses principles and strategies for the interplay between applied work with organizations and an academic research agenda. I emphasize lessons I have learned through my own work and my own mistakes, with special focus on some high-stakes settings, including advising Cornell University's response to the COVID-19 pandemic and work with the emergency services, among other applications.\n},\n\taddress = {Piscataway, New Jersey},\n\tauthor = {Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2023 Winter Simulation Conference},\n\tdate-added = {2023-05-05 13:46:37 -0400},\n\tdate-modified = {2023-05-05 13:49:07 -0400},\n\teditor = {C. G. Corlu and S. R. Hunter and H. Lam and B. S. Onggo and J. Shortle and B. Biller},\n\tpages = {Submitted},\n\tpublisher = {IEEE},\n\ttitle = {Practical impact and academia are not antonyms},\n\tyear = {2023}}\n\n
\n
\n\n\n
\n This tutorial discusses principles and strategies for the interplay between applied work with organizations and an academic research agenda. I emphasize lessons I have learned through my own work and my own mistakes, with special focus on some high-stakes settings, including advising Cornell University's response to the COVID-19 pandemic and work with the emergency services, among other applications. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SPT optimality (mostly) via linear programming.\n \n \n \n \n\n\n \n Cho, W.; Shmoys, D. B.; and Henderson, S. G.\n\n\n \n\n\n\n Operations Research Letters, 51: 99-104. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"SPTPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{choshmhen22,\n\tabstract = {One of the oldest results in scheduling theory is that the Shortest Processing Time (SPT) rule finds an\noptimal solution to the problem of scheduling jobs on identical parallel machines to minimize average\njob completion times. We present a new proof of correctness of SPT based on linear programming (LP).\nOur proof relies on a generalization of a single-machine result that yields an equivalence between two\nscheduling problems. We first identify and solve an appropriate variant of our problem, then map its\nsolutions to solutions for our original problem to establish SPT optimality. Geometric insights used\ntherein may find further uses; we demonstrate two applications of the same principle in generalized\nsettings.},\n\tauthor = {Woo-Hyung Cho and David B. Shmoys and Shane G. Henderson},\n\tdate-added = {2022-02-07 14:53:59 -0500},\n\tdate-modified = {2024-04-04 15:47:05 +1030},\n\tjournal = {Operations Research Letters},\n\tpages = {99-104},\n\ttitle = {{SPT} optimality (mostly) via linear programming},\n\turl = {https://doi.org/10.1016/j.orl.2022.12.007},\n\tvolume = {51},\n\tyear = {2023},\n\tbdsk-url-1 = {https://doi.org/10.1016/j.orl.2022.12.007}}\n\n
\n
\n\n\n
\n One of the oldest results in scheduling theory is that the Shortest Processing Time (SPT) rule finds an optimal solution to the problem of scheduling jobs on identical parallel machines to minimize average job completion times. We present a new proof of correctness of SPT based on linear programming (LP). Our proof relies on a generalization of a single-machine result that yields an equivalence between two scheduling problems. We first identify and solve an appropriate variant of our problem, then map its solutions to solutions for our original problem to establish SPT optimality. Geometric insights used therein may find further uses; we demonstrate two applications of the same principle in generalized settings.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Diagnostic Tools for Evaluating and Comparing Simulation-Optimization Algorithms.\n \n \n \n \n\n\n \n Eckman, D. J.; Henderson, S. G.; and Shashaani, S.\n\n\n \n\n\n\n INFORMS Journal on Computing, 35(2): 350-367. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"DiagnosticPaper\n  \n \n \n \"Diagnostic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 11 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{eckhensha21,\n\tabstract = {Simulation optimization involves the optimization of some objective function that can only be estimated via stochastic simulation. Many important problems can be profitably viewed within this framework. While many solvers - implementations of simulation-optimization algorithms - exist or are in development, comparisons among solvers are not standardized and are often limited in scope. Such comparisons help advance solver development, clarify the relative performance of solvers and identify classes of problems that defy efficient solution, among many other uses. We develop performance measures and plots, and estimators thereof, that can be used to evaluate and compare solvers on a testbed of simulation-optimization problems. We explain the need for two-level simulation in this context and provide supporting theory to guide the use of common random numbers to achieve efficient comparisons. We also describe how to use bootstrapping to obtain error estimates for the estimators.},\n\tauthor = {David J. Eckman and Shane G. Henderson and Sara Shashaani},\n\tdate-added = {2022-01-08 11:02:28 -0500},\n\tdate-modified = {2023-04-13 08:11:22 -0400},\n\tjournal = {{INFORMS} Journal on Computing},\n\tnumber = {2},\n\tpages = {350-367},\n\ttitle = {Diagnostic Tools for Evaluating and Comparing Simulation-Optimization Algorithms},\n\turl = {https://doi.org/10.1287/ijoc.2022.1261},\n\turl_paper = {pubs/comparingsimopt.pdf},\n\tvolume = {35},\n\tyear = {2023},\n\tbdsk-url-1 = {https://doi.org/10.1287/ijoc.2022.1261}}\n\n
\n
\n\n\n
\n Simulation optimization involves the optimization of some objective function that can only be estimated via stochastic simulation. Many important problems can be profitably viewed within this framework. While many solvers - implementations of simulation-optimization algorithms - exist or are in development, comparisons among solvers are not standardized and are often limited in scope. Such comparisons help advance solver development, clarify the relative performance of solvers and identify classes of problems that defy efficient solution, among many other uses. We develop performance measures and plots, and estimators thereof, that can be used to evaluate and compare solvers on a testbed of simulation-optimization problems. We explain the need for two-level simulation in this context and provide supporting theory to guide the use of common random numbers to achieve efficient comparisons. We also describe how to use bootstrapping to obtain error estimates for the estimators.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SimOpt: A testbed for simulation-optimization experiments.\n \n \n \n \n\n\n \n Eckman, D. J.; Henderson, S. G.; and Shashaani, S.\n\n\n \n\n\n\n INFORMS Journal on Computing, 35(2): 495-508. 2023.\n \n\n\n\n
\n\n\n\n \n \n \"SimOpt:Paper\n  \n \n \n \"SimOpt: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{eckhensha22,\n\tabstract = {This paper introduces a major redesign of SimOpt, a testbed of simulation-optimization (SO) problems and solvers. The testbed promotes the empirical evaluation and comparison of solvers and aims to accelerate their development. Relative to previous versions of SimOpt, the redesign ports the code to an object-oriented architecture in Python; includes an implementation of the MRG32k3a random-number generator that supports streams, substreams and subsubstreams; supports the automated use of common random numbers for efficiency; includes a powerful suite of plotting tools for visualizing experiment results;  uses bootstrapping to obtain error estimates; accommodates the use of data farming to explore simulation models and optimization solvers as their input parameters vary;  and provides a graphical user interface. The SimOpt source code is available on a GitHub repository under a permissive open-source license.},\n\tauthor = {David J. Eckman and Shane G. Henderson and Sara Shashaani},\n\tdate-added = {2020-09-19 11:50:09 -0400},\n\tdate-modified = {2023-07-14 11:27:08 -0400},\n\tjournal = {{INFORMS} Journal on Computing},\n\tnumber = {2},\n\tpages = {495-508},\n\ttitle = {{SimOpt}: A testbed for simulation-optimization experiments},\n\turl = {https://doi.org/10.1287/ijoc.2023.1273},\n\turl_paper = {pubs/comparingsimopt.pdf},\n\tvolume = {35},\n\tyear = {2023},\n\tbdsk-url-1 = {https://doi.org/10.1287/ijoc.2023.1273}}\n\n
\n
\n\n\n
\n This paper introduces a major redesign of SimOpt, a testbed of simulation-optimization (SO) problems and solvers. The testbed promotes the empirical evaluation and comparison of solvers and aims to accelerate their development. Relative to previous versions of SimOpt, the redesign ports the code to an object-oriented architecture in Python; includes an implementation of the MRG32k3a random-number generator that supports streams, substreams and subsubstreams; supports the automated use of common random numbers for efficiency; includes a powerful suite of plotting tools for visualizing experiment results; uses bootstrapping to obtain error estimates; accommodates the use of data farming to explore simulation models and optimization solvers as their input parameters vary; and provides a graphical user interface. The SimOpt source code is available on a GitHub repository under a permissive open-source license.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Stochastic constraints: How feasible is feasible?.\n \n \n \n\n\n \n Eckman, D. J.; Henderson, S. G.; and Shashaani, S.\n\n\n \n\n\n\n In Corlu, C. G.; Hunter, S. R.; Lam, H.; Onggo, B. S.; Shortle, J.; and Biller, B., editor(s), Proceedings of the 2023 Winter Simulation Conference, pages Submitted, Piscataway, New Jersey, 2023. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{eckhensha23,\n\tabstract = {Stochastic constraints, which constrain an expectation in the context of simulation optimization, can be hard to conceptualize and harder still to assess. As with a deterministic constraint, a solution is considered either feasible or infeasible with respect to a stochastic constraint. This perspective belies the subjective nature of stochastic constraints, which often arise when attempting to avoid alternative optimization formulations with multiple objectives or an aggregate objective with weights. Moreover, a solution's feasibility with respect to a stochastic constraint cannot, in general, be ascertained based on only a finite number of simulation replications. We introduce different means of estimating how ``close'' the expected performance of a given solution is to being feasible with respect to one or more stochastic constraints. Some of these metrics entail a modest amount of numerical optimization. We explore how these metrics and their bootstrapped error estimates can be incorporated into plots showing a solver's progress over time when solving a stochastically constrained problem.\n},\n\taddress = {Piscataway, New Jersey},\n\tauthor = {David J. Eckman and Shane G. Henderson and Sara Shashaani},\n\tbooktitle = {Proceedings of the 2023 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2023-05-05 13:49:46 -0400},\n\teditor = {C. G. Corlu and S. R. Hunter and H. Lam and B. S. Onggo and J. Shortle and B. Biller},\n\tpages = {Submitted},\n\tpublisher = {IEEE},\n\ttitle = {Stochastic constraints: How feasible is feasible?},\n\tyear = {2023}}\n\n
\n
\n\n\n
\n Stochastic constraints, which constrain an expectation in the context of simulation optimization, can be hard to conceptualize and harder still to assess. As with a deterministic constraint, a solution is considered either feasible or infeasible with respect to a stochastic constraint. This perspective belies the subjective nature of stochastic constraints, which often arise when attempting to avoid alternative optimization formulations with multiple objectives or an aggregate objective with weights. Moreover, a solution's feasibility with respect to a stochastic constraint cannot, in general, be ascertained based on only a finite number of simulation replications. We introduce different means of estimating how ``close'' the expected performance of a given solution is to being feasible with respect to one or more stochastic constraints. Some of these metrics entail a modest amount of numerical optimization. We explore how these metrics and their bootstrapped error estimates can be incorporated into plots showing a solver's progress over time when solving a stochastically constrained problem. \n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2022\n \n \n (10)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n How should volunteers be dispatched to out-of-hospital cardiac arrest cases?.\n \n \n \n \n\n\n \n Henderson, S. G.; van den Berg , P. L.; Jagtenberg, C. J.; and Li, H.\n\n\n \n\n\n\n Queueing Systems, 100(3-4): 437-439. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"HowPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{berhenjagli22,\n\tauthor = {Shane G. Henderson and Pieter L. {van den Berg} and Caroline J. Jagtenberg and Hemeng Li},\n\tdate-added = {2024-01-29 12:10:51 +1300},\n\tdate-modified = {2024-01-29 12:10:51 +1300},\n\tjournal = {Queueing Systems},\n\tnumber = {3-4},\n\tpages = {437-439},\n\ttitle = {How should volunteers be dispatched to out-of-hospital cardiac arrest cases?},\n\turl = {https://doi.org/10.1007/s11134-022-09752-z},\n\tvolume = {100},\n\tyear = {2022},\n\tbdsk-url-1 = {https://doi.org/10.1007/s11134-022-09752-z}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Posterior-based stopping rules for Bayesian ranking-and-selection procedures.\n \n \n \n \n\n\n \n Eckman, D. J.; and Henderson, S. G.\n\n\n \n\n\n\n INFORMS Journal on Computing, 34(3): 1711-1728. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Posterior-basedPaper\n  \n \n \n \"Posterior-based paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 9 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{eckhen20b,\n\tabstract = {Sequential ranking-and-selection procedures deliver Bayesian guarantees by repeatedly evaluating a posterior quantity of interest - e.g., the posterior probability of correct selection or posterior expected opportunity cost - and terminating when this quantity crosses some threshold. We discuss the perceived shortcomings of the posterior probability of correct selection guarantee. Motivated by settings in which there are many alternatives, we develop several methods for improving the computational efficiency of exactly checking these kinds of posterior-based stopping rules. We demonstrate via simulation experiments that the proposed methods can, in some instances, significantly reduce the number of simulation replications taken compared to procedures that instead use cheaply computable bounds on the posterior quantity of interest, e.g., those based on Bonferroni's or Slepian's inequalities. We further show that the use of a Monte Carlo estimate of the posterior quantity of interest allows for these savings to be gained with little added computational effort.},\n\tauthor = {David J. Eckman and Shane G. Henderson},\n\tdate-added = {2022-12-21 05:52:00 -0500},\n\tdate-modified = {2022-12-21 05:52:00 -0500},\n\tjournal = {{INFORMS} Journal on Computing},\n\tnumber = {3},\n\tpages = {1711-1728},\n\ttitle = {Posterior-based stopping rules for {B}ayesian ranking-and-selection procedures},\n\turl = {https://doi.org/10.1287/ijoc.2021.1132},\n\turl_paper = {pubs/posterior_based_stopping.pdf},\n\tvolume = {34},\n\tyear = {2022},\n\tbdsk-url-1 = {https://doi.org/10.1287/ijoc.2021.1132}}\n\n
\n
\n\n\n
\n Sequential ranking-and-selection procedures deliver Bayesian guarantees by repeatedly evaluating a posterior quantity of interest - e.g., the posterior probability of correct selection or posterior expected opportunity cost - and terminating when this quantity crosses some threshold. We discuss the perceived shortcomings of the posterior probability of correct selection guarantee. Motivated by settings in which there are many alternatives, we develop several methods for improving the computational efficiency of exactly checking these kinds of posterior-based stopping rules. We demonstrate via simulation experiments that the proposed methods can, in some instances, significantly reduce the number of simulation replications taken compared to procedures that instead use cheaply computable bounds on the posterior quantity of interest, e.g., those based on Bonferroni's or Slepian's inequalities. We further show that the use of a Monte Carlo estimate of the posterior quantity of interest allows for these savings to be gained with little added computational effort.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Scheduling with predictions.\n \n \n \n \n\n\n \n Cho, W.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n Submitted. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Scheduling paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{chohenshm22,\n\tabstract = {There is significant interest in deploying machine learning algorithms for\ndiagnostic radiology, as modern learning techniques have made it possible to\ndetect abnormalities in medical images within minutes. While machine-assisted\ndiagnoses cannot yet reliably replace human reviews of images by a radiologist,\nthey could inform prioritization rules for determining the order by which to\nreview patient cases so that patients with time-sensitive conditions could\nbenefit from early intervention.\n\n We study this scenario by formulating it as a learning-augmented online\nscheduling problem. We are given information about each arriving patient's\nurgency level in advance, but these predictions are inevitably error-prone. In\nthis formulation, we face the challenges of decision making under imperfect\ninformation, and of responding dynamically to prediction error as we observe\nbetter data in real-time. We propose a simple online policy and show that this\npolicy is in fact the best possible in certain stylized settings. We also\ndemonstrate that our policy achieves the two desiderata of online algorithms\nwith predictions: consistency (performance improvement with prediction\naccuracy) and robustness (protection against the worst case). We complement our\ntheoretical findings with empirical evaluations of the policy under settings\nthat more accurately reflect clinical scenarios in the real world.},\n\tauthor = {Woo-Hyung Cho and Shane G. Henderson and David B. Shmoys},\n\tdate-added = {2022-12-21 05:38:38 -0500},\n\tdate-modified = {2022-12-21 05:40:56 -0500},\n\tjournal = {Submitted},\n\ttitle = {Scheduling with predictions},\n\turl_paper = {http://arxiv.org/abs/2212.10433},\n\tyear = {2022}}\n\n
\n
\n\n\n
\n There is significant interest in deploying machine learning algorithms for diagnostic radiology, as modern learning techniques have made it possible to detect abnormalities in medical images within minutes. While machine-assisted diagnoses cannot yet reliably replace human reviews of images by a radiologist, they could inform prioritization rules for determining the order by which to review patient cases so that patients with time-sensitive conditions could benefit from early intervention. We study this scenario by formulating it as a learning-augmented online scheduling problem. We are given information about each arriving patient's urgency level in advance, but these predictions are inevitably error-prone. In this formulation, we face the challenges of decision making under imperfect information, and of responding dynamically to prediction error as we observe better data in real-time. We propose a simple online policy and show that this policy is in fact the best possible in certain stylized settings. We also demonstrate that our policy achieves the two desiderata of online algorithms with predictions: consistency (performance improvement with prediction accuracy) and robustness (protection against the worst case). We complement our theoretical findings with empirical evaluations of the policy under settings that more accurately reflect clinical scenarios in the real world.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Emulation of epidemics via Bluetooth-based virtual safe virus spread: Experimental setup, software, and data.\n \n \n \n \n\n\n \n Asanjarani, A. A. S.; Aminath AND Chew, K. A. G.; Thomas AND Henderson, S. G. A. J.; Hermanus M. AND Short, K. R. A. T.; Peter G. AND Vuorinen, A. A. Y.; and Yuvraj AND Ziedins, I. A. N.\n\n\n \n\n\n\n PLOS Digital Health, 1(12): 1-23. 12 2022.\n \n\n\n\n
\n\n\n\n \n \n \"EmulationPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{asaetal22,\n\tabstract = {We describe an experimental setup and a currently running experiment for evaluating how physical interactions over time and between individuals affect the spread of epidemics. Our experiment involves the voluntary use of the Safe Blues Android app by participants at The University of Auckland (UoA) City Campus in New Zealand. The app spreads multiple virtual safe virus strands via Bluetooth depending on the physical proximity of the subjects. The evolution of the virtual epidemics is recorded as they spread through the population. The data is presented as a real-time (and historical) dashboard. A simulation model is applied to calibrate strand parameters. Participants' locations are not recorded, but participants are rewarded based on the duration of participation within a geofenced area, and aggregate participation numbers serve as part of the data. The 2021 experimental data is available as an open-source anonymized dataset, and once the experiment is complete, the remaining data will be made available. This paper outlines the experimental setup, software, subject-recruitment practices, ethical considerations, and dataset description. The paper also highlights current experimental results in view of the lockdown that started in New Zealand at 23:59 on August 17, 2021. The experiment was initially planned in the New Zealand environment, expected to be free of COVID and lockdowns after 2020. However, a COVID Delta strain lockdown shuffled the cards and the experiment is currently extended into 2022.},\n\tauthor = {Asanjarani, Azam AND Shausan, Aminath AND Chew, Keng AND Graham, Thomas AND Henderson, Shane G. AND Jansen, Hermanus M. AND Short, Kirsty R. AND Taylor, Peter G. AND Vuorinen, Aapeli AND Yadav, Yuvraj AND Ziedins, Ilze AND Nazarathy, Yoni},\n\tdate-added = {2022-12-05 07:46:51 -0500},\n\tdate-modified = {2022-12-12 11:59:40 +0800},\n\tdoi = {10.1371/journal.pdig.0000142},\n\tjournal = {{PLOS} Digital Health},\n\tmonth = {12},\n\tnumber = {12},\n\tpages = {1-23},\n\tpublisher = {Public Library of Science},\n\ttitle = {Emulation of epidemics via Bluetooth-based virtual safe virus spread: Experimental setup, software, and data},\n\turl = {https://doi.org/10.1371/journal.pdig.0000142},\n\tvolume = {1},\n\tyear = {2022},\n\tbdsk-url-1 = {https://doi.org/10.1371/journal.pdig.0000142}}\n\n
\n
\n\n\n
\n We describe an experimental setup and a currently running experiment for evaluating how physical interactions over time and between individuals affect the spread of epidemics. Our experiment involves the voluntary use of the Safe Blues Android app by participants at The University of Auckland (UoA) City Campus in New Zealand. The app spreads multiple virtual safe virus strands via Bluetooth depending on the physical proximity of the subjects. The evolution of the virtual epidemics is recorded as they spread through the population. The data is presented as a real-time (and historical) dashboard. A simulation model is applied to calibrate strand parameters. Participants' locations are not recorded, but participants are rewarded based on the duration of participation within a geofenced area, and aggregate participation numbers serve as part of the data. The 2021 experimental data is available as an open-source anonymized dataset, and once the experiment is complete, the remaining data will be made available. This paper outlines the experimental setup, software, subject-recruitment practices, ethical considerations, and dataset description. The paper also highlights current experimental results in view of the lockdown that started in New Zealand at 23:59 on August 17, 2021. The experiment was initially planned in the New Zealand environment, expected to be free of COVID and lockdowns after 2020. However, a COVID Delta strain lockdown shuffled the cards and the experiment is currently extended into 2022.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Routine Surveillance and Vaccination on a University Campus During the Spread of the SARS-CoV-2 Omicron Variant.\n \n \n \n \n\n\n \n Meredith, G. R.; Diel, D. G.; Frazier, P. I.; Henderson, S. G.; Koretzky, G. A.; Wan, J.; and Warnick, L. D.\n\n\n \n\n\n\n JAMA Network Open, 5(5): e2212906-e2212906. 05 2022.\n \n\n\n\n
\n\n\n\n \n \n \"RoutinePaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{meretal22,\n\tauthor = {Meredith, Genevive R. and Diel, Diego G. and Frazier, Peter I. and Henderson, Shane G. and Koretzky, Gary A. and Wan, Jiayue and Warnick, Lorin D.},\n\tdate-added = {2022-05-20 11:21:02 -0400},\n\tdate-modified = {2022-05-20 11:21:02 -0400},\n\tdoi = {10.1001/jamanetworkopen.2022.12906},\n\teprint = {https://jamanetwork.com/journals/jamanetworkopen/articlepdf/2792382/meredith\\_2022\\_ld\\_220097\\_1651905040.62453.pdf},\n\tissn = {2574-3805},\n\tjournal = {JAMA Network Open},\n\tmonth = {05},\n\tnumber = {5},\n\tpages = {e2212906-e2212906},\n\ttitle = {{Routine Surveillance and Vaccination on a University Campus During the Spread of the SARS-CoV-2 Omicron Variant}},\n\turl = {https://doi.org/10.1001/jamanetworkopen.2022.12906},\n\tvolume = {5},\n\tyear = {2022},\n\tbdsk-url-1 = {https://doi.org/10.1001/jamanetworkopen.2022.12906}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Automatic differentiation for gradient estimators in simulation.\n \n \n \n\n\n \n Ford, M. T.; Eckman, D. J.; and Henderson, S. G.\n\n\n \n\n\n\n In Feng, B.; Pedrielli, G.; Peng, Y.; Shashaani, S.; Song, E.; Corlu, C.; Lee, L.; and Lendermann, P., editor(s), Proceedings of the 2022 Winter Simulation Conference. IEEE, 2022.\n Submitted\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{foreckhen22,\n\tabstract = {Automatic differentiation (AD) can provide infinitesimal perturbation analysis (IPA) derivative estimates\ndirectly from simulation code. These gradient estimators are simple to obtain analytically, at least in\nprinciple, but may be tedious to derive and implement in code. AD software tools aim to ease this workload\nby requiring little more than writing the simulation code. We review considerations when choosing an AD\ntool for simulation, demonstrate how to apply some specific AD tools to simulation, and provide insightful\nexperiments highlighting the effects of different choices to be made when applying AD in simulation.},\n\tauthor = {Matthew T. Ford and David J. Eckman and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2022 Winter Simulation Conference},\n\tdate-added = {2022-04-30 09:03:13 -0400},\n\tdate-modified = {2022-04-30 19:13:16 -0400},\n\teditor = {B. Feng and G. Pedrielli and Y. Peng and S. Shashaani and E. Song and C.G. Corlu and L.H. Lee and P. Lendermann},\n\tnote = {Submitted},\n\tpublisher = {IEEE},\n\ttitle = {Automatic differentiation for gradient estimators in simulation},\n\tyear = {2022}}\n\n
\n
\n\n\n
\n Automatic differentiation (AD) can provide infinitesimal perturbation analysis (IPA) derivative estimates directly from simulation code. These gradient estimators are simple to obtain analytically, at least in principle, but may be tedious to derive and implement in code. AD software tools aim to ease this workload by requiring little more than writing the simulation code. We review considerations when choosing an AD tool for simulation, demonstrate how to apply some specific AD tools to simulation, and provide insightful experiments highlighting the effects of different choices to be made when applying AD in simulation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Central Limit Theorem for Empirical Quantiles in the Markov Chain Setting.\n \n \n \n \n\n\n \n Glynn, P. W.; and Henderson, S. G.\n\n\n \n\n\n\n In Festschrift for Pierre L'Ecuyer. Springer, 2022.\n To appear\n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{glyhen22,\n\tabstract = {We provide a new proof of a central limit theorem for empirical\nquantiles in the positive-recurrent Markov process setting under\nconditions that are essentially tight. We also\nestablish the validity of the method of nonoverlapping batch means\nwith a fixed number of batches for interval estimation of the\nquantile. The conditions of these results are likely to be difficult\nto verify in practice, and so we also provide more easily verified\nsufficient conditions.},\n\tauthor = {Peter W. Glynn and Shane G. Henderson},\n\tbooktitle = {Festschrift for Pierre L'Ecuyer},\n\tdate-added = {2022-04-24 22:00:03 -0400},\n\tdate-modified = {2022-04-24 22:05:14 -0400},\n\tnote = {To appear},\n\tpublisher = {Springer},\n\ttitle = {A Central Limit Theorem for Empirical Quantiles in the Markov Chain Setting},\n\turl_paper = {pubs/glyhen22.pdf},\n\tyear = {2022}}\n\n
\n
\n\n\n
\n We provide a new proof of a central limit theorem for empirical quantiles in the positive-recurrent Markov process setting under conditions that are essentially tight. We also establish the validity of the method of nonoverlapping batch means with a fixed number of batches for interval estimation of the quantile. The conditions of these results are likely to be difficult to verify in practice, and so we also provide more easily verified sufficient conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Booster vaccination protection against SARS-CoV-2 infections in young adults during an Omicron BA.1-predominant period: a retrospective cohort study.\n \n \n \n\n\n \n Wan, J.; Cazer, C. L.; Clarkberg, M. E.; Henderson, S. G.; Lee, S. E.; Meredith, G.; Osman, M.; Shmoys, D. B.; and Frazier, P. I.\n\n\n \n\n\n\n PLOS Medicine. 2022.\n To appear\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{wanetal22,\n\tauthor = {Jiayue Wan and Casey L. Cazer and Marin E. Clarkberg and Shane G. Henderson and Scarlett E. Lee and Genevive Meredith and Marwan Osman and David B. Shmoys and Peter I. Frazier},\n\tdate-added = {2022-02-08 21:58:39 -0500},\n\tdate-modified = {2022-12-12 11:58:39 +0800},\n\tjournal = {{PLOS} Medicine},\n\tnote = {To appear},\n\ttitle = {Booster vaccination protection against {SARS-CoV}-2 infections in young adults during an {O}micron {BA}.1-predominant period: a retrospective cohort study},\n\tyear = {2022}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Modeling for COVID-19 college reopening decisions: Cornell, a case study.\n \n \n \n \n\n\n \n Frazier, P. I.; Cashore, J. M.; Duan, N.; Henderson, S. G.; Janmohamed, A.; Liu, B.; Shmoys, D. B.; Wan, J.; and Zhang, Y.\n\n\n \n\n\n\n Proceedings of the National Academy of Sciences, 119(2). 2022.\n \n\n\n\n
\n\n\n\n \n \n \"ModelingPaper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{fraetal21,\n\tabstract = {Significance: Decisions surrounding how to safely reopen universities directly impact 7\\% of the US population (students, staff) and indirectly impact tens of millions more (families, communities). After witnessing large COVID-19 outbreaks among students from August 2020 to the present, universities want to provide safety while minimizing social and financial costs, despite uncertainty about vaccine hesitancy, vaccine efficacy, more transmissible variants with the potential for immune escape, and community prevalence. When the Delta variant is dominant, we find substantial risk reduction in moving student populations from mostly (75\\%) to fully (100\\%) vaccinated, in testing vaccinated students once per week even when all students are vaccinated, and in more frequent testing targeted to the most social groups of students.\n\nAbstrract: We consider epidemiological modeling for the design of COVID-19 interventions in university populations, which have seen significant outbreaks during the pandemic. A central challenge is sensitivity of predictions to input parameters coupled with uncertainty about these parameters. Nearly 2 y into the pandemic, parameter uncertainty remains because of changes in vaccination efficacy, viral variants, and mask mandates, and because universities{\\textquoteright} unique characteristics hinder translation from the general population: a high fraction of young people, who have higher rates of asymptomatic infection and social contact, as well as an enhanced ability to implement behavioral and testing interventions. We describe an epidemiological model that formed the basis for Cornell University{\\textquoteright}s decision to reopen for in-person instruction in fall 2020 and supported the design of an asymptomatic screening program instituted concurrently to prevent viral spread. We demonstrate how the structure of these decisions allowed risk to be minimized despite parameter uncertainty leading to an inability to make accurate point estimates and how this generalizes to other university settings. We find that once-per-week asymptomatic screening of vaccinated undergraduate students provides substantial value against the Delta variant, even if all students are vaccinated, and that more targeted testing of the most social vaccinated students provides further value.Code and simulation results have been deposited in GitHub, https://github.com/peter-i-frazier/group-testing (57). Data collected from Cornell students and employees during surveillance testing is HIPAA-protected and not approved for public release. Aggregates from this data approved for release are reported in the main text and SI Appendix.},\n\tauthor = {Frazier, Peter I. and Cashore, J. Massey and Duan, Ning and Henderson, Shane G. and Janmohamed, Alyf and Liu, Brian and Shmoys, David B. and Wan, Jiayue and Zhang, Yujia},\n\tdate-added = {2022-01-03 17:05:40 -0500},\n\tdate-modified = {2022-05-20 10:19:52 -0400},\n\tdoi = {10.1073/pnas.2112532119},\n\telocation-id = {e2112532119},\n\teprint = {https://www.pnas.org/content/119/2/e2112532119.full.pdf},\n\tissn = {0027-8424},\n\tjournal = {Proceedings of the National Academy of Sciences},\n\tnumber = {2},\n\tpublisher = {National Academy of Sciences},\n\ttitle = {Modeling for COVID-19 college reopening decisions: Cornell, a case study},\n\turl = {https://doi.org/10.1073/pnas.2112532119},\n\tvolume = {119},\n\tyear = {2022},\n\tbdsk-url-1 = {https://www.pnas.org/content/119/2/e2112532119},\n\tbdsk-url-2 = {https://doi.org/10.1073/pnas.2112532119}}\n\n
\n
\n\n\n
\n Significance: Decisions surrounding how to safely reopen universities directly impact 7% of the US population (students, staff) and indirectly impact tens of millions more (families, communities). After witnessing large COVID-19 outbreaks among students from August 2020 to the present, universities want to provide safety while minimizing social and financial costs, despite uncertainty about vaccine hesitancy, vaccine efficacy, more transmissible variants with the potential for immune escape, and community prevalence. When the Delta variant is dominant, we find substantial risk reduction in moving student populations from mostly (75%) to fully (100%) vaccinated, in testing vaccinated students once per week even when all students are vaccinated, and in more frequent testing targeted to the most social groups of students. Abstrract: We consider epidemiological modeling for the design of COVID-19 interventions in university populations, which have seen significant outbreaks during the pandemic. A central challenge is sensitivity of predictions to input parameters coupled with uncertainty about these parameters. Nearly 2 y into the pandemic, parameter uncertainty remains because of changes in vaccination efficacy, viral variants, and mask mandates, and because universities\\textquoteright unique characteristics hinder translation from the general population: a high fraction of young people, who have higher rates of asymptomatic infection and social contact, as well as an enhanced ability to implement behavioral and testing interventions. We describe an epidemiological model that formed the basis for Cornell University\\textquoterights decision to reopen for in-person instruction in fall 2020 and supported the design of an asymptomatic screening program instituted concurrently to prevent viral spread. We demonstrate how the structure of these decisions allowed risk to be minimized despite parameter uncertainty leading to an inability to make accurate point estimates and how this generalizes to other university settings. We find that once-per-week asymptomatic screening of vaccinated undergraduate students provides substantial value against the Delta variant, even if all students are vaccinated, and that more targeted testing of the most social vaccinated students provides further value.Code and simulation results have been deposited in GitHub, https://github.com/peter-i-frazier/group-testing (57). Data collected from Cornell students and employees during surveillance testing is HIPAA-protected and not approved for public release. Aggregates from this data approved for release are reported in the main text and SI Appendix.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Minimizing multimodular functions and allocating capacity in bike-sharing systems.\n \n \n \n \n\n\n \n Freund, D.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n Operations Research, 70(5): 2715-2731. 2022.\n \n\n\n\n
\n\n\n\n \n \n \"Minimizing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{frehenshm17b,\n\tauthor = {Daniel Freund and Shane G. Henderson and David B. Shmoys},\n\tdate-added = {2017-06-15 14:08:37 +0000},\n\tdate-modified = {2022-11-20 12:52:51 -0500},\n\tjournal = {Operations Research},\n\tnumber = {5},\n\tpages = {2715-2731},\n\ttitle = {Minimizing multimodular functions and allocating capacity in bike-sharing systems},\n\turl_paper = {pubs/BikesOR.pdf},\n\tvolume = {70},\n\tyear = {2022}}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2021\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Fixed-confidence, fixed-tolerance guarantees for selection-of-the-best procedures.\n \n \n \n \n\n\n \n Eckman, D. J.; and Henderson, S. G.\n\n\n \n\n\n\n ACM Transactions on Modeling and Computer Simulation, 31(1): Article 7, 1–33. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Fixed-confidence,Paper\n  \n \n \n \"Fixed-confidence, paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{eckhen18,\n\tabstract = {Ever since the conception of the statistical ranking-and-selection (R\\&S) problem, a predominant approach has been the indifference-zone (IZ) formulation.\nUnder the IZ formulation, R\\&S procedures are designed to provide a guarantee on the probability of correct selection (PCS) whenever the performance of the best system exceeds that of the second-best system by a specified amount.\nWe discuss the shortcomings of this guarantee and argue that providing a guarantee on the probability of good selection (PGS)---selecting a system whose performance is within a specified tolerance of the best---is a more justifiable goal.\nUnfortunately, this form of fixed-confidence, fixed-tolerance guarantee has received far less attention within the simulation community.\nWe present an overview of the PGS guarantee with the aim of reorienting the simulation community towards this goal.\nWe examine numerous techniques for proving the PGS guarantee, including sufficient conditions under which selection and subset-selection procedures that deliver the IZ-inspired PCS guarantee also deliver the PGS guarantee.},\n\tauthor = {David J. Eckman and Shane G. Henderson},\n\tdate-added = {2021-09-16 08:52:56 -0400},\n\tdate-modified = {2022-05-20 10:33:39 -0400},\n\tjournal = {{ACM} Transactions on Modeling and Computer Simulation},\n\tnumber = {1},\n\tpages = {Article 7, 1--33},\n\ttitle = {Fixed-confidence, fixed-tolerance guarantees for selection-of-the-best procedures},\n\turl = {https://doi.org/10.1145/3432754},\n\turl_paper = {pubs/PGS_for_RS-TOMACS.pdf},\n\tvolume = {31},\n\tyear = {2021},\n\tbdsk-url-1 = {https://doi.org/10.1145/3432754}}\n\n
\n
\n\n\n
\n Ever since the conception of the statistical ranking-and-selection (R&S) problem, a predominant approach has been the indifference-zone (IZ) formulation. Under the IZ formulation, R&S procedures are designed to provide a guarantee on the probability of correct selection (PCS) whenever the performance of the best system exceeds that of the second-best system by a specified amount. We discuss the shortcomings of this guarantee and argue that providing a guarantee on the probability of good selection (PGS)—selecting a system whose performance is within a specified tolerance of the best—is a more justifiable goal. Unfortunately, this form of fixed-confidence, fixed-tolerance guarantee has received far less attention within the simulation community. We present an overview of the PGS guarantee with the aim of reorienting the simulation community towards this goal. We examine numerous techniques for proving the PGS guarantee, including sufficient conditions under which selection and subset-selection procedures that deliver the IZ-inspired PCS guarantee also deliver the PGS guarantee.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reflections on Simulation Optimization.\n \n \n \n \n\n\n \n Henderson, S. G.\n\n\n \n\n\n\n In Kim, S.; Feng, B.; Smith, K.; Masoud, S.; Zheng, Z.; Szabo, C.; and Loper, M., editor(s), Proceedings of the 2021 Winter Simulation Conference, Piscataway NJ, 2021. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"ReflectionsPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hen21,\n\tabstract = {I provide some perspectives on simulation\noptimization. First, more attention should be devoted to the\nfinite-time performance of solvers than on ensuring convergence\nproperties that may only arise in asymptotic time scales that may\nnever be reached in practice. Both analytical results and computational\nexperiments can further this goal. Second, so-called sample-path\nfunctions can exhibit extremely complex behavior that is well worth\nunderstanding in selecting a solver and its parameters. Third, I\nadvocate the use of a layered approach to formulating and solving\noptimization problems, whereby a sequence of models are built and\noptimized, rather than first building a simulation model and only\nlater ``bolting on'' optimization.\n},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2021 Winter Simulation Conference},\n\tdate-added = {2021-04-21 17:21:39 -0400},\n\tdate-modified = {2022-05-20 13:02:56 -0400},\n\teditor = {S. Kim and B. Feng and K. Smith and S. Masoud and Z. Zheng and C. Szabo and M. Loper},\n\torganization = {IEEE},\n\ttitle = {Reflections on Simulation Optimization},\n\turl = {https://www.informs-sim.org/wsc21papers/001.pdf},\n\tyear = {2021},\n\tbdsk-url-1 = {https://www.informs-sim.org/wsc21papers/001.pdf}}\n\n
\n
\n\n\n
\n I provide some perspectives on simulation optimization. First, more attention should be devoted to the finite-time performance of solvers than on ensuring convergence properties that may only arise in asymptotic time scales that may never be reached in practice. Both analytical results and computational experiments can further this goal. Second, so-called sample-path functions can exhibit extremely complex behavior that is well worth understanding in selecting a solver and its parameters. Third, I advocate the use of a layered approach to formulating and solving optimization problems, whereby a sequence of models are built and optimized, rather than first building a simulation model and only later ``bolting on'' optimization. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Safe Blues: the case for virtual safe virus spread in the long-term fight against epidemics.\n \n \n \n \n\n\n \n Dandekar, R.; Henderson, S. G.; Jansen, H. M.; McDonald, J.; Moka, S.; Nazarathy, Y.; Rackauckas, C.; Stace, T. M.; Taylor, P. G.; and Vuorinen, A.\n\n\n \n\n\n\n Patterns, 2: 1–9. 2021.\n \n\n\n\n
\n\n\n\n \n \n \"Safe paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 7 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{danetal20,\n\tauthor = {Raj Dandekar and Shane G. Henderson and Hermanus M. Jansen and Joshua McDonald and Sarat Moka and Yoni Nazarathy and Christopher Rackauckas and Thomas M. Stace and Peter G. Taylor and Aapeli Vuorinen},\n\tdate-added = {2020-09-19 11:51:07 -0400},\n\tdate-modified = {2021-03-19 15:25:08 -0400},\n\tdoi = {https://doi.org/10.1016/j.patter.2021.100220},\n\tjournal = {Patterns},\n\tpages = {1--9},\n\ttitle = {Safe Blues: the case for virtual safe virus spread in the long-term fight against epidemics},\n\turl_paper = {pubs/SafeBluesPatterns.pdf},\n\tvolume = {2},\n\tyear = {2021},\n\tbdsk-url-1 = {https://doi.org/10.1016/j.patter.2021.100220}}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2020\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Biased gradient estimators in simulation optimization.\n \n \n \n \n\n\n \n Eckman, D. J.; and Henderson, S. G.\n\n\n \n\n\n\n In Bae, K.; Feng, B.; Kim, S.; Lazarova-Molnar, S.; Zheng, Z.; Roeder, T.; and Thiesing, R., editor(s), Proceedings of the 2020 Winter Simulation Conference, pages 2935–2946, Piscataway NJ, 2020. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Biased paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{eckhen20,\n\tabstract = {Within the simulation community, the prevailing wisdom seems to be that when solving a simulation optimization problem, biased gradient estimators should not be used to guide a local-search algorithm.\nOn the contrary, we argue that for certain problems, biased gradient estimators may still provide useful directional information.\nWe focus on the infinitesimal perturbation analysis (IPA) gradient estimator, which is  biased when an interchange of differentiation and expectation fails.\nAlthough a local-search algorithm guided by biased gradient estimators will likely not converge to a local optimal solution, it might be expected to reach a neighborhood of one. We test such a gradient-based search on an ambulance base location problem, demonstrating its effectiveness in a non-trivial example, and present some supporting theoretical results.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {David J. Eckman and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2020 Winter Simulation Conference},\n\tdate-added = {2020-04-23 09:35:21 -0400},\n\tdate-modified = {2021-07-08 16:51:27 -0400},\n\teditor = {K.-H. Bae and B. Feng and S. Kim and S. {Lazarova-Molnar} and Z. Zheng and T. Roeder and R. Thiesing},\n\torganization = {IEEE},\n\tpages = {2935--2946},\n\ttitle = {Biased gradient estimators in simulation optimization},\n\turl_paper = {https://informs-sim.org/wsc20papers/299.pdf},\n\tyear = {2020}}\n\n
\n
\n\n\n
\n Within the simulation community, the prevailing wisdom seems to be that when solving a simulation optimization problem, biased gradient estimators should not be used to guide a local-search algorithm. On the contrary, we argue that for certain problems, biased gradient estimators may still provide useful directional information. We focus on the infinitesimal perturbation analysis (IPA) gradient estimator, which is biased when an interchange of differentiation and expectation fails. Although a local-search algorithm guided by biased gradient estimators will likely not converge to a local optimal solution, it might be expected to reach a neighborhood of one. We test such a gradient-based search on an ambulance base location problem, demonstrating its effectiveness in a non-trivial example, and present some supporting theoretical results.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Pump allocation at a New Zealand oil refinery.\n \n \n \n \n\n\n \n Henderson, S. G.; Gill, G.; and Patel, P.\n\n\n \n\n\n\n In Murty, K., editor(s), Models for Optimum Decision Making in Crude Oil Production and Refining. Springer, 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Pump paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{henpat00,\n\tabstract = {We consider a non-traditional assignment problem that arose at a New\nZealand oil refinery. A mixed integer program is developed to\ndetermine an assignment of pumps to tanks that minimizes the time\nrequired to perform a blending operation. The formulation involves a\nnon-traditional transformation of the objective function which is\ninteresting in its own right. \n\nAn extension, in which we show how to reduce the pumping time by using\nmore expensive component materials used in the blend, is provided here.},\n\tauthor = {S. G. Henderson and G. Gill and P. Patel},\n\tbooktitle = {Models for Optimum Decision Making in Crude Oil Production and Refining},\n\tdate-added = {2019-11-16 14:16:04 -0800},\n\tdate-modified = {2019-11-16 14:16:04 -0800},\n\teditor = {K. Murty},\n\tpublisher = {Springer},\n\ttitle = {Pump allocation at a {N}ew {Z}ealand oil refinery},\n\turl_paper = {pubs/Pumps-extended.pdf},\n\tyear = {2020}}\n\n
\n
\n\n\n
\n We consider a non-traditional assignment problem that arose at a New Zealand oil refinery. A mixed integer program is developed to determine an assignment of pumps to tanks that minimizes the time required to perform a blending operation. The formulation involves a non-traditional transformation of the objective function which is interesting in its own right. An extension, in which we show how to reduce the pumping time by using more expensive component materials used in the blend, is provided here.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimating the probability that a function observed with noise is convex.\n \n \n \n \n\n\n \n Jian, N.; and Henderson, S. G.\n\n\n \n\n\n\n INFORMS Journal on Computing, 32(2): 376–389. 2020.\n \n\n\n\n
\n\n\n\n \n \n \"Estimating arxiv\n  \n \n \n \"Estimating paper\n  \n \n \n \"Estimating supplement\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{jiahen17,\n\tauthor = {Nanjing Jian and Shane G. Henderson},\n\tdate-added = {2017-02-24 18:22:32 +0000},\n\tdate-modified = {2022-04-30 19:10:51 -0400},\n\tjournal = {{INFORMS} Journal on Computing},\n\tnumber = {2},\n\tpages = {376--389},\n\ttitle = {Estimating the probability that a function observed with noise is convex},\n\turl_arxiv = {http://arxiv.org/abs/1703.04185},\n\turl_paper = {pubs/JianHenIJOC18.pdf},\n\turl_supplement = {pubs/JianHenIJOC18Supplement.pdf},\n\tvolume = {32},\n\tyear = {2020}}\n\n
\n
\n\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2019\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Redesigning a testbed of simulation-optimization problems and solvers for experimental comparisons.\n \n \n \n\n\n \n Eckman, D. J.; Henderson, S. G.; and Pasupathy, R.\n\n\n \n\n\n\n In Mustafee, N.; Bae, K.; Lazarova-Molnar, S.; Rabe, M.; Szabo, C.; Haas, P.; and Son, Y., editor(s), Proceedings of the 2019 Winter Simulation Conference, pages 3457–3467, Piscataway NJ, 2019. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{eckhenpas19,\n\tabstract = {This tutorial provides an overview of guarantees on the probability of good selection (PGS) for ranking-and-selection procedures, i.e., statistical guarantees on selecting---with high probability---an alternative whose expected performance is within a given tolerance of the best. We discuss why PGS guarantees are superior to more popular, related guarantees on the probability of correct selection (PCS) under the indifference-zone formulation. We review existing procedures that deliver PGS guarantees and assess several direct and indirect methods of proof. We compare the frequentist and Bayesian interpretations of PGS and highlight the differences in how procedures with PGS guarantees are designed under the two frameworks.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {David J. Eckman and Shane G. Henderson and Raghu Pasupathy},\n\tbooktitle = {Proceedings of the 2019 Winter Simulation Conference},\n\tdate-added = {2020-02-24 14:04:05 +1300},\n\tdate-modified = {2020-02-24 14:05:37 +1300},\n\teditor = {N. Mustafee and K.-H.G. Bae and S. Lazarova-Molnar and M. Rabe and C. Szabo and P. Haas and Y.-J. Son},\n\torganization = {IEEE},\n\tpages = {3457--3467},\n\ttitle = {Redesigning a testbed of simulation-optimization problems and solvers for experimental comparisons},\n\tyear = {2019}}\n\n
\n
\n\n\n
\n This tutorial provides an overview of guarantees on the probability of good selection (PGS) for ranking-and-selection procedures, i.e., statistical guarantees on selecting—with high probability—an alternative whose expected performance is within a given tolerance of the best. We discuss why PGS guarantees are superior to more popular, related guarantees on the probability of correct selection (PCS) under the indifference-zone formulation. We review existing procedures that deliver PGS guarantees and assess several direct and indirect methods of proof. We compare the frequentist and Bayesian interpretations of PGS and highlight the differences in how procedures with PGS guarantees are designed under the two frameworks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Bike Sharing.\n \n \n \n\n\n \n Freund, D.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n In Hu, M., editor(s), Sharing Economy: Making Supply Meet Demand, volume 6, of Springer Series in Supply Chain Management, pages 435 – 459. Springer, 2019.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{frehenshm17c,\n\tabstract = {We discuss planning methods for bike-sharing systems that operate a set of stations consisting of docks. Specific questions include decisions related to the number of docks to allocate to each station, how to rebalance the system by moving bikes to match demand, and expansion planning. We describe linear integer programming models, specially tailored optimization algorithms, and simulation methods. All of these methods rely on careful statistical analysis of bike-sharing data, which we also briefly review. Our discussion of the issues is informed by our 4-year collaboration with Citi Bike in New York City, and its parent company Motivate.},\n\tauthor = {Daniel Freund and Shane G. Henderson and David B. Shmoys},\n\tbooktitle = {Sharing Economy: Making Supply Meet Demand},\n\tdate-added = {2019-01-23 12:01:37 -0500},\n\tdate-modified = {2019-01-23 12:01:37 -0500},\n\teditor = {Ming Hu},\n\tpages = {435 -- 459},\n\tpublisher = {Springer},\n\tseries = {Springer Series in Supply Chain Management},\n\ttitle = {Bike Sharing},\n\tvolume = {6},\n\tyear = {2019}}\n\n
\n
\n\n\n
\n We discuss planning methods for bike-sharing systems that operate a set of stations consisting of docks. Specific questions include decisions related to the number of docks to allocate to each station, how to rebalance the system by moving bikes to match demand, and expansion planning. We describe linear integer programming models, specially tailored optimization algorithms, and simulation methods. All of these methods rely on careful statistical analysis of bike-sharing data, which we also briefly review. Our discussion of the issues is informed by our 4-year collaboration with Citi Bike in New York City, and its parent company Motivate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Analytics and bikes: riding tandem with Motivate to improve mobility.\n \n \n \n \n\n\n \n Freund, D.; Henderson, S. G.; O'Mahony, E.; and Shmoys, D. B.\n\n\n \n\n\n\n INFORMS Journal on Applied Analytics, 49(5): 310–323. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Analytics paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{frehenomashm19,\n\tabstract = {Bike-sharing systems are now ubiquitous across the United States. We have worked with Motivate, the operator of the systems in, for example, New York, Chicago, and San Francisco, to innovate a data-driven approach to managing both their day-to-day operations and to provide insight on several central issues in the design of its systems. This work required the development of a number of new optimization models, characterizing their mathematical structure, and using this insight in designing algorithms to solve them. Here, we focus on two particularly high impact projects, an initiative to improve the allocation of docks to stations, and the creation of an incentive scheme to crowdsource rebalancing. Both of these projects have been fully implemented to improve the performance of Motivate's systems across the country; for example, the Bike Angels program in New York City yields a system-wide improvement comparable to that obtained through Motivate's traditional rebalancing efforts, at far less financial and environmental cost.},\n\tauthor = {Daniel Freund and Shane G. Henderson and Eoin O'{M}ahony and David B. Shmoys},\n\tdate-added = {2018-11-07 19:52:28 +0000},\n\tdate-modified = {2020-04-29 14:09:16 -0400},\n\tjournal = {{INFORMS} Journal on Applied Analytics},\n\tnumber = {5},\n\tpages = {310--323},\n\ttitle = {Analytics and bikes: riding tandem with {M}otivate to improve mobility},\n\turl_paper = {pubs/RidingTandemWithMotivate.pdf},\n\tvolume = {49},\n\tyear = {2019}}\n\n
\n
\n\n\n
\n Bike-sharing systems are now ubiquitous across the United States. We have worked with Motivate, the operator of the systems in, for example, New York, Chicago, and San Francisco, to innovate a data-driven approach to managing both their day-to-day operations and to provide insight on several central issues in the design of its systems. This work required the development of a number of new optimization models, characterizing their mathematical structure, and using this insight in designing algorithms to solve them. Here, we focus on two particularly high impact projects, an initiative to improve the allocation of docks to stations, and the creation of an incentive scheme to crowdsource rebalancing. Both of these projects have been fully implemented to improve the performance of Motivate's systems across the country; for example, the Bike Angels program in New York City yields a system-wide improvement comparable to that obtained through Motivate's traditional rebalancing efforts, at far less financial and environmental cost.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Data-Driven Rebalancing Methods for Bike-Share Systems.\n \n \n \n \n\n\n \n Freund, D.; Norouzi-Fard, A.; Paul, A. J.; Wang, C.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n In Ghaddar, B.; Naoum-Sawaya, J.; Hausler, F.; Russo, G.; and Shorten, R., editor(s), Analytics for the sharing economy: Mathematics, Engineering and Business perspectives. Springer, 2019.\n To appear\n\n\n\n
\n\n\n\n \n \n \"Data-Driven paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 8 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{freetal19,\n\tabstract = {As bike-share systems expand in urban areas, the wealth of publicly available data has drawn researchers to address the novel operational challenges'' these systems face. One key challenge is to meet user demand for available bikes and docks by rebalancing the system. This chapter reports on a collaborative effort with Citi Bike to develop and implement real data-driven optimization to guide their rebalancing efforts. In particular, we provide new models to guide truck routing for overnight rebalancing and new optimization problems for other non-motorized rebalancing efforts during the day. Finally, we evaluate how our practical methods have impacted rebalancing in New York City.},\n\tauthor = {Daniel Freund and Ashkan Norouzi-Fard and Alice J. Paul and Carter Wang and Shane G. Henderson and David B. Shmoys},\n\tbooktitle = {Analytics for the sharing economy: Mathematics, Engineering and Business perspectives},\n\tdate-added = {2018-01-11 15:51:05 +0000},\n\tdate-modified = {2020-02-24 13:59:14 +1300},\n\teditor = {B. Ghaddar and J. Naoum-Sawaya and F. Hausler and G. Russo and R. Shorten},\n\tnote = {To appear},\n\tpublisher = {Springer},\n\ttitle = {Data-Driven Rebalancing Methods for Bike-Share Systems},\n\turl_paper = {pubs/BSOvernight.pdf},\n\tyear = {2019}}\n\n
\n
\n\n\n
\n As bike-share systems expand in urban areas, the wealth of publicly available data has drawn researchers to address the novel operational challenges'' these systems face. One key challenge is to meet user demand for available bikes and docks by rebalancing the system. This chapter reports on a collaborative effort with Citi Bike to develop and implement real data-driven optimization to guide their rebalancing efforts. In particular, we provide new models to guide truck routing for overnight rebalancing and new optimization problems for other non-motorized rebalancing efforts during the day. Finally, we evaluate how our practical methods have impacted rebalancing in New York City.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Predicting the simulation budget in ranking and selection procedures.\n \n \n \n \n\n\n \n Ma, S.; and Henderson, S. G.\n\n\n \n\n\n\n ACM Transactions on Modeling and Computer Simulation, 29(3): Article 14, 1–25. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Predicting code\n  \n \n \n \"Predicting paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{mahen17b,\n\tabstract = {The goal of ranking and selection (R\\&S) procedures is to identify the best among a finite set of alternative systems evaluated by stochastic\nsimulation, providing a probability guarantee on the quality of the\nsolution. In order to solve large-scale R\\&S problems, especially in\nparallel computing platforms where variable numbers of cores might be\nused, it is helpful to be able to predict the simulation\n  budget, which is almost always the dominant portion of the running time of a given procedure for a given\nproblem. Non-trivial issues arise due to the need to estimate the\nsystem configuration. We propose a set of methods for predicting the\nsimulation budget. Numerical results compare our predictions for several\nleading R\\&S procedures.\n},\n\tauthor = {Sijia Ma and Shane G. Henderson},\n\tdate-added = {2018-01-11 15:33:39 +0000},\n\tdate-modified = {2020-02-24 13:06:41 +1300},\n\tjournal = {{ACM} Transactions on Modeling and Computer Simulation},\n\tnumber = {3},\n\tpages = {Article 14, 1--25},\n\ttitle = {Predicting the simulation budget in ranking and selection procedures},\n\turl_code = {https://github.com/sjasonma/RnSRunningTime},\n\turl_paper = {pubs/RandSRuntime.pdf},\n\tvolume = {29},\n\tyear = {2019}}\n\n
\n
\n\n\n
\n The goal of ranking and selection (R&S) procedures is to identify the best among a finite set of alternative systems evaluated by stochastic simulation, providing a probability guarantee on the quality of the solution. In order to solve large-scale R&S problems, especially in parallel computing platforms where variable numbers of cores might be used, it is helpful to be able to predict the simulation budget, which is almost always the dominant portion of the running time of a given procedure for a given problem. Non-trivial issues arise due to the need to estimate the system configuration. We propose a set of methods for predicting the simulation budget. Numerical results compare our predictions for several leading R&S procedures. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Probabilistic bisection converges almost as quickly as stochastic approximation.\n \n \n \n \n\n\n \n Frazier, P. I.; Henderson, S. G.; and Waeber, R.\n\n\n \n\n\n\n Mathematics of Operations Research, 44(2): 651–667. 2019.\n \n\n\n\n
\n\n\n\n \n \n \"Probabilistic arxiv\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{frahenwae16,\n\tabstract = {The probabilistic bisection algorithm (PBA) solves a class of\nstochastic root-finding problems in one dimension by successively updating a\nprior belief on the location of the root based on noisy responses to \nqueries at chosen points. The responses indicate\nthe direction of the root from the queried point, and are incorrect\nwith a fixed probability. The fixed-probability assumption is\nproblematic in applications, and so we extend the PBA to apply when\nthis assumption is relaxed. The extension involves the use of a\npower-one test at each queried\npoint. We explore the convergence behavior of the extended PBA, showing that it\nconverges at a rate arbitrarily close to, but slower than, the canonical ``square root''\nrate of stochastic approximation.},\n\tauthor = {P. I. Frazier and S. G. Henderson and R. Waeber},\n\tdate-added = {2016-08-26 18:10:53 +0000},\n\tdate-modified = {2020-02-24 13:10:58 +1300},\n\tjournal = {Mathematics of Operations Research},\n\tnumber = {2},\n\tpages = {651--667},\n\ttitle = {Probabilistic bisection converges almost as quickly as stochastic approximation},\n\turl_arxiv = {http://arxiv.org/abs/1612.03964},\n\tvolume = {44},\n\tyear = {2019}}\n\n
\n
\n\n\n
\n The probabilistic bisection algorithm (PBA) solves a class of stochastic root-finding problems in one dimension by successively updating a prior belief on the location of the root based on noisy responses to queries at chosen points. The responses indicate the direction of the root from the queried point, and are incorrect with a fixed probability. The fixed-probability assumption is problematic in applications, and so we extend the PBA to apply when this assumption is relaxed. The extension involves the use of a power-one test at each queried point. We explore the convergence behavior of the extended PBA, showing that it converges at a rate arbitrarily close to, but slower than, the canonical ``square root'' rate of stochastic approximation.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2018\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Minimizing multimodular functions and allocating capacity in bike-sharing systems.\n \n \n \n \n\n\n \n Freund, D.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n Production and Operations Management, 27(12): 2346-2349. 2018.\n Extended abstract\n\n\n\n
\n\n\n\n \n \n \"Minimizing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{frehenshm18,\n\tauthor = {Daniel Freund and Shane G. Henderson and David B. Shmoys},\n\tdate-added = {2019-01-03 10:18:34 -0500},\n\tdate-modified = {2019-01-03 10:18:34 -0500},\n\tjournal = {Production and Operations Management},\n\tnote = {Extended abstract},\n\tnumber = {12},\n\tpages = {2346-2349},\n\ttitle = {Minimizing multimodular functions and allocating capacity in bike-sharing systems},\n\turl_paper = {pubs/POMSMinimizingMultimodular.pdf},\n\tvolume = {27},\n\tyear = {2018}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A sequential selection procedure delivering a probably-approximately-correct selection using confidence bands.\n \n \n \n \n\n\n \n Ma, S.; and Henderson, S. G.\n\n\n \n\n\n\n 2018.\n Submitted\n\n\n\n
\n\n\n\n \n \n \"A code\n  \n \n \n \"A parallelcode\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{mahen18,\n\tabstract = {Ranking and Selection (R&S) procedures are commonly used for choosing the best among a finite set of candidate systems, each of which is associated with a simulation model. We present a new fully sequential selection procedure, the Envelope Procedure (EP), that provides a probably-approximately-correct guarantee on the selection. The EP has a rigorous complexity analysis for a particular choice of sampling strategy. Numerical experiments show that EP is more efficient than some leading R&S procedures in many problem configurations.},\n\tauthor = {Sijia Ma and Shane G. Henderson},\n\tdate-added = {2018-10-01 22:58:58 +0000},\n\tdate-modified = {2018-10-01 23:00:17 +0000},\n\tnote = {Submitted},\n\ttitle = {A sequential selection procedure delivering a probably-approximately-correct selection using confidence bands},\n\turl_code = {https://github.com/sjasonma/EnvelopeProcedure},\n\turl_parallelcode = {https://github.com/sjasonma/ParallelEP},\n\tyear = {2018}}\n\n
\n
\n\n\n
\n Ranking and Selection (R&S) procedures are commonly used for choosing the best among a finite set of candidate systems, each of which is associated with a simulation model. We present a new fully sequential selection procedure, the Envelope Procedure (EP), that provides a probably-approximately-correct guarantee on the selection. The EP has a rigorous complexity analysis for a particular choice of sampling strategy. Numerical experiments show that EP is more efficient than some leading R&S procedures in many problem configurations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Reusing search data in ranking and selection: What could possibly go wrong?.\n \n \n \n \n\n\n \n Eckman, D. J.; and Henderson, S. G.\n\n\n \n\n\n\n ACM Transactions on Modeling and Computer Simulation, 28(3): Article 18. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Reusing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{eckhen16,\n\tabstract = {It is tempting to reuse replications taken during a simulation optimization search as input to a ranking-and-selection procedure. However, even when the random inputs used to generate replications are i.i.d. and independent across systems, we show that for searches that use the observed performance of explored systems to identify new systems, the replications are conditionally dependent given the sequence of returned systems. Through simulation experiments, we demonstrate that reusing the replications taken during search in selection and subset-selection procedures can result in probabilities of correct and good selection well below the guaranteed levels. Based on these negative findings, we call into question the guarantees of established ranking-and-selection procedures that reuse search data. We also rigorously define guarantees for ranking-and-selection procedures after search and discuss how procedures that only provide guarantees in the preference zone are ill-suited to this setting.\n},\n\tauthor = {David J. Eckman and Shane G. Henderson},\n\tdate-added = {2018-07-11 19:06:38 +0000},\n\tdate-modified = {2018-07-11 19:06:38 +0000},\n\tjournal = {{ACM} Transactions on Modeling and Computer Simulation},\n\tnumber = {3},\n\tpages = {Article 18},\n\ttitle = {Reusing search data in ranking and selection: What could possibly go wrong?},\n\turl_paper = {pubs/Eckman18Reusing.pdf},\n\tvolume = {28},\n\tyear = {2018}}\n\n
\n
\n\n\n
\n It is tempting to reuse replications taken during a simulation optimization search as input to a ranking-and-selection procedure. However, even when the random inputs used to generate replications are i.i.d. and independent across systems, we show that for searches that use the observed performance of explored systems to identify new systems, the replications are conditionally dependent given the sequence of returned systems. Through simulation experiments, we demonstrate that reusing the replications taken during search in selection and subset-selection procedures can result in probabilities of correct and good selection well below the guaranteed levels. Based on these negative findings, we call into question the guarantees of established ranking-and-selection procedures that reuse search data. We also rigorously define guarantees for ranking-and-selection procedures after search and discuss how procedures that only provide guarantees in the preference zone are ill-suited to this setting. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Probability of good selection guarantees.\n \n \n \n\n\n \n Eckman, D. J.; and Henderson, S. G.\n\n\n \n\n\n\n In Rabe, M.; Juan, A. A.; Mustafee, N.; Skoogh, A.; Jain, S.; and Johansson, B., editor(s), Proceedings of the 2018 Winter Simulation Conference, pages 351–365, Piscataway NJ, 2018. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{eckhen18b,\n\tabstract = {This tutorial provides an overview of guarantees on the probability of good selection (PGS) for ranking-and-selection procedures, i.e., statistical guarantees on selecting---with high probability---an alternative whose expected performance is within a given tolerance of the best. We discuss why PGS guarantees are superior to more popular, related guarantees on the probability of correct selection (PCS) under the indifference-zone formulation. We review existing procedures that deliver PGS guarantees and assess several direct and indirect methods of proof. We compare the frequentist and Bayesian interpretations of PGS and highlight the differences in how procedures with PGS guarantees are designed under the two frameworks.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {David J. Eckman and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2018 Winter Simulation Conference},\n\tdate-added = {2018-05-04 15:34:21 +0000},\n\tdate-modified = {2019-01-03 10:30:41 -0500},\n\teditor = {M. Rabe and A. A. Juan and N. Mustafee and A. Skoogh and S. Jain and B. Johansson},\n\torganization = {IEEE},\n\tpages = {351--365},\n\ttitle = {Probability of good selection guarantees},\n\tyear = {2018}}\n\n
\n
\n\n\n
\n This tutorial provides an overview of guarantees on the probability of good selection (PGS) for ranking-and-selection procedures, i.e., statistical guarantees on selecting—with high probability—an alternative whose expected performance is within a given tolerance of the best. We discuss why PGS guarantees are superior to more popular, related guarantees on the probability of correct selection (PCS) under the indifference-zone formulation. We review existing procedures that deliver PGS guarantees and assess several direct and indirect methods of proof. We compare the frequentist and Bayesian interpretations of PGS and highlight the differences in how procedures with PGS guarantees are designed under the two frameworks.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Two-Class Routing with Admission Control and Strict Priorities.\n \n \n \n \n\n\n \n Chong, K. C.; Henderson, S. G.; and Lewis, M. E.\n\n\n \n\n\n\n Probability in the Engineering and Informational Sciences, 32(2): 163-178. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Two-Class paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{chohenlew16,\n\tabstract = {We consider the problem of routing and admission control in a service system featuring two classes of arriving jobs (high-priority and low-priority jobs) and two types of servers, in which decision-making for high-priority jobs is forced, and rewards influence the desirability of each of the four possible routing decisions. We seek a policy that maximizes expected long-run reward under both the discounted reward and long-run average reward criteria, and formulate the problem as a Markov decision process. When the reward structure favors high-priority jobs, we demonstrate that there exists an optimal monotone switching curve policy with slope of at least −1. When the reward structure favors low-priority jobs, we demonstrate that the value function, in general, is fairly unstructured, which complicates the search for structure in optimal policies. However, we identify conditions under which optimal policies can be characterized in greater detail. We also examine the performance of heuristic policies in a brief numerical study.},\n\tauthor = {Kenneth C. Chong and Shane G. Henderson and Mark E. Lewis},\n\tdate-added = {2016-11-30 23:08:00 +0000},\n\tdate-modified = {2018-10-01 22:35:15 +0000},\n\tjournal = {Probability in the Engineering and Informational Sciences},\n\tnumber = {2},\n\tpages = {163-178},\n\ttitle = {Two-Class Routing with Admission Control and Strict Priorities},\n\turl_paper = {pubs/chohenlew17.pdf},\n\tvolume = {32},\n\tyear = {2018}}\n\n
\n
\n\n\n
\n We consider the problem of routing and admission control in a service system featuring two classes of arriving jobs (high-priority and low-priority jobs) and two types of servers, in which decision-making for high-priority jobs is forced, and rewards influence the desirability of each of the four possible routing decisions. We seek a policy that maximizes expected long-run reward under both the discounted reward and long-run average reward criteria, and formulate the problem as a Markov decision process. When the reward structure favors high-priority jobs, we demonstrate that there exists an optimal monotone switching curve policy with slope of at least −1. When the reward structure favors low-priority jobs, we demonstrate that the value function, in general, is fairly unstructured, which complicates the search for structure in optimal policies. However, we identify conditions under which optimal policies can be characterized in greater detail. We also examine the performance of heuristic policies in a brief numerical study.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Aggregating courier deliveries.\n \n \n \n \n\n\n \n Steele, P.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n Naval Research Logistics, 65(3): 187-202. 2018.\n \n\n\n\n
\n\n\n\n \n \n \"Aggregating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{stehenshm16,\n\tabstract = {  We consider the problem of efficiently scheduling deliveries by an\n  uncapacitated courier from a central location under online\n  arrivals. We consider both adversary-controlled and Poisson arrival\n  processes. In the adversarial setting we provide a randomized\n  $\\paren{3 \\beta \\Delta / 2 \\delta - 1 }$-competitive algorithm,\n  where $\\beta$ is the approximation ratio of the Traveling Salesman\n  Problem, $\\delta$ is the minimum distance between the central\n  location and any customer, and $\\Delta$ is the length of the optimal\n  Traveling Salesman tour over all customer locations and the central\n  location. We provide instances showing that this analysis is\n  tight. We also prove a $1 + 0.271 \\Delta / \\delta$ lower-bound on\n  the competitive ratio of any algorithm in this setting. In the\n  Poisson setting, we relax our assumption of deterministic travel\n  times by assuming that travel times are distributed with a mean\n  equal to the excursion length. We prove that optimal policies in\n  this setting follow a threshold structure and describe this\n  structure. For the half-line metric space we bound the performance\n  of the randomized algorithm in the Poisson setting, and show through\n  numerical experiments that the performance of the algorithm is often\n  much better than this bound. },\n\tauthor = {Patrick Steele and Shane G. Henderson and David B. Shmoys},\n\tdate-added = {2016-03-17 17:07:15 +0000},\n\tdate-modified = {2018-09-07 02:21:35 +0000},\n\tjournal = {Naval Research Logistics},\n\tnumber = {3},\n\tpages = {187-202},\n\ttitle = {Aggregating courier deliveries},\n\turl_paper = {https://rdcu.be/4Cfy},\n\tvolume = {65},\n\tyear = {2018}}\n\n
\n
\n\n\n
\n We consider the problem of efficiently scheduling deliveries by an uncapacitated courier from a central location under online arrivals. We consider both adversary-controlled and Poisson arrival processes. In the adversarial setting we provide a randomized $\\paren{3 β Δ / 2 δ - 1 }$-competitive algorithm, where $β$ is the approximation ratio of the Traveling Salesman Problem, $δ$ is the minimum distance between the central location and any customer, and $Δ$ is the length of the optimal Traveling Salesman tour over all customer locations and the central location. We provide instances showing that this analysis is tight. We also prove a $1 + 0.271 Δ / δ$ lower-bound on the competitive ratio of any algorithm in this setting. In the Poisson setting, we relax our assumption of deterministic travel times by assuming that travel times are distributed with a mean equal to the excursion length. We prove that optimal policies in this setting follow a threshold structure and describe this structure. For the half-line metric space we bound the performance of the randomized algorithm in the Poisson setting, and show through numerical experiments that the performance of the algorithm is often much better than this bound. \n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2017\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Teaching undergraduate simulation - 4 questions for 4 experienced instructors.\n \n \n \n\n\n \n Smith, J.; Alexopoulos, C.; Henderson, S. G.; and Schruben, L.\n\n\n \n\n\n\n In Chan, W. K. V.; D'Ambrogio, A.; Zacharewicz, G.; Mustafee, N.; Wainer, G.; and Page, E., editor(s), Proceedings of the 2017 Winter Simulation Conference, pages 4264–4275, Piscataway NJ, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{smietal17,\n\tabstract = {This paper and the corresponding panel session focus on teaching undergraduate simulation courses. The format brings together four experienced instructors to discuss four questions involving the structure and topic outlines of courses, print and software teaching materials used, and general teaching methods/philosophy. The hope is to provide some experience-based teaching information for new and soon-to-be instructors and to generate discussion with the simulation education community.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Jeffrey Smith and Christos Alexopoulos and Shane G. Henderson and Lee Schruben},\n\tbooktitle = {Proceedings of the 2017 Winter Simulation Conference},\n\tdate-added = {2018-05-04 15:27:17 +0000},\n\tdate-modified = {2018-05-04 15:32:47 +0000},\n\teditor = {W. K. V. Chan and A. D'Ambrogio and G. Zacharewicz and N. Mustafee and G. Wainer and E. Page},\n\torganization = {IEEE},\n\tpages = {4264--4275},\n\ttitle = {Teaching undergraduate simulation - 4 questions for 4 experienced instructors},\n\tyear = {2017}}\n\n
\n
\n\n\n
\n This paper and the corresponding panel session focus on teaching undergraduate simulation courses. The format brings together four experienced instructors to discuss four questions involving the structure and topic outlines of courses, print and software teaching materials used, and general teaching methods/philosophy. The hope is to provide some experience-based teaching information for new and soon-to-be instructors and to generate discussion with the simulation education community.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n (Even) smarter tools for (Citi)bike sharing.\n \n \n \n\n\n \n Freund, D.; Norouzi-Fard, A.; Paul, A.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n In Association for the Advancement of Artificial Intelligence Proceedings, pages Submitted, 2017. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{freetal2017,\n\tauthor = {Daniel Freund and Ashkan Norouzi-Fard and Alice Paul and Shane G. Henderson and David B. Shmoys},\n\tbooktitle = {Association for the Advancement of Artificial Intelligence Proceedings},\n\tdate-added = {2018-01-10 00:08:51 +0000},\n\tdate-modified = {2018-01-10 00:08:51 +0000},\n\tpages = {Submitted},\n\ttitle = {(Even) smarter tools for (Citi)bike sharing},\n\tyear = {2017}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Minimizing multimodular functions and allocating capacity in bike-sharing systems.\n \n \n \n \n\n\n \n Freund, D.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n In Eisenbrand, F.; and Koenemann, J., editor(s), Integer Programming and Combinatorial Optimization Proceedings, volume 10328, of Lecture Notes in Computer Science, pages 186–198, 2017. Springer\n \n\n\n\n
\n\n\n\n \n \n \"Minimizing arxiv\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{frehenshm17,\n\tauthor = {Daniel Freund and Shane G. Henderson and David B. Shmoys},\n\tbooktitle = {Integer Programming and Combinatorial Optimization Proceedings},\n\tdate-added = {2017-06-21 11:38:53 +0000},\n\tdate-modified = {2017-06-21 11:38:53 +0000},\n\teditor = {F. Eisenbrand and J. Koenemann},\n\tpages = {186--198},\n\tpublisher = {Springer},\n\tseries = {Lecture Notes in Computer Science},\n\ttitle = {Minimizing multimodular functions and allocating capacity in bike-sharing systems},\n\turl_arxiv = {https://arxiv.org/abs/1611.09304},\n\tvolume = {10328},\n\tyear = {2017}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparing the finite-time performance of simulation-optimization algorithms.\n \n \n \n \n\n\n \n Dong, N. A.; Eckman, D. J.; Poloczek, M.; Zhao, X.; and Henderson, S. G.\n\n\n \n\n\n\n In Chan, W. K. V.; D'Ambrogio, A.; Zacharewicz, G.; Mustafee, N.; Wainer, G.; and Page, E., editor(s), Proceedings of the 2017 Winter Simulation Conference, pages 2206–2217, Piscataway NJ, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Comparing arxiv\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{donetal17,\n\tabstract = {We empirically evaluate the finite-time performance of several simulation-optimization algorithms on a testbed of problems with the goal of motivating further development of algorithms with strong finite-time performance. We investigate if the observed performance of the algorithms can be explained by properties of the problems, e.g., the number of decision variables, the topology of the objective function, or the magnitude of the simulation error. \n},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Naijia Anna Dong and David J. Eckman and Matthias Poloczek and Xueqi Zhao and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2017 Winter Simulation Conference},\n\tdate-added = {2017-05-23 12:00:47 +0000},\n\tdate-modified = {2018-05-04 15:31:59 +0000},\n\teditor = {W. K. V. Chan and A. D'Ambrogio and G. Zacharewicz and N. Mustafee and G. Wainer and E. Page},\n\torganization = {IEEE},\n\tpages = {2206--2217},\n\ttitle = {Comparing the finite-time performance of simulation-optimization algorithms},\n\turl_arxiv = {http://arxiv.org/abs/1705.07825},\n\tyear = {2017}}\n\n
\n
\n\n\n
\n We empirically evaluate the finite-time performance of several simulation-optimization algorithms on a testbed of problems with the goal of motivating further development of algorithms with strong finite-time performance. We investigate if the observed performance of the algorithms can be explained by properties of the problems, e.g., the number of decision variables, the topology of the objective function, or the magnitude of the simulation error. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n History of seeking better solutions, AKA simulation optimization.\n \n \n \n\n\n \n Fu, M. C.; and Henderson, S. G.\n\n\n \n\n\n\n In Chan, W. K. V.; D'Ambrogio, A.; Zacharewicz, G.; Mustafee, N.; Wainer, G.; and Page, E., editor(s), Proceedings of the 2017 Winter Simulation Conference, pages 131–157, Piscataway NJ, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{fuhen17,\n\tabstract = {Simulation optimization - arguably the ultimate aim of most simulation users - has had a long and illustrious history closely tied with the 50 years of the Winter Simulation Conference (WSC). We touch upon the historical developments of the field, highlighting research progress and their interactions with WSC. Specific areas covered include ranking \\& selection methods, stochastic approximation and gradient estimation, response surface methodology, and sample average approximation. We discuss the interplay between research and practice, including software developments. We conclude with some reflections on the state of the field.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Michael C. Fu and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2017 Winter Simulation Conference},\n\tdate-added = {2017-05-23 12:00:45 +0000},\n\tdate-modified = {2018-05-04 15:31:11 +0000},\n\teditor = {W. K. V. Chan and A. D'Ambrogio and G. Zacharewicz and N. Mustafee and G. Wainer and E. Page},\n\torganization = {IEEE},\n\tpages = {131--157},\n\ttitle = {History of seeking better solutions, AKA simulation optimization},\n\tyear = {2017}}\n\n
\n
\n\n\n
\n Simulation optimization - arguably the ultimate aim of most simulation users - has had a long and illustrious history closely tied with the 50 years of the Winter Simulation Conference (WSC). We touch upon the historical developments of the field, highlighting research progress and their interactions with WSC. Specific areas covered include ranking & selection methods, stochastic approximation and gradient estimation, response surface methodology, and sample average approximation. We discuss the interplay between research and practice, including software developments. We conclude with some reflections on the state of the field.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n An efficient fully sequential selection procedure guaranteeing probably approximately correct selection.\n \n \n \n\n\n \n Ma, S.; and Henderson, S. G.\n\n\n \n\n\n\n In Chan, W. K. V.; D'Ambrogio, A.; Zacharewicz, G.; Mustafee, N.; Wainer, G.; and Page, E., editor(s), Proceedings of the 2017 Winter Simulation Conference, pages 2225–2236, Piscataway NJ, 2017. IEEE\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{mahen17,\n\tabstract = {Ranking and Selection (R\\&S) procedures are designed for selecting the best among a finite set of systems using stochastic simulation, guaranteeing the quality of the final selection. Instead of assuming a known lower bound on the difference between the best and others, we consider the probably approximately correct (PAC) selection formulation, which ensures a high quality solution with high probability for all configurations. In this paper, we present a new fully sequential selection procedure, called the Envelope Procedure (EP), which accommodates a variety of sampling rules. A particular sampling rule that achieves good efficiency is proposed. We compare the efficiency of the EP with some existing procedures in numerical experiments, and the results show that the EP saves considerable computational effort in many problem configurations.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Sijia Ma and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2017 Winter Simulation Conference},\n\tdate-added = {2017-05-23 11:59:04 +0000},\n\tdate-modified = {2018-05-04 15:30:21 +0000},\n\teditor = {W. K. V. Chan and A. D'Ambrogio and G. Zacharewicz and N. Mustafee and G. Wainer and E. Page},\n\torganization = {IEEE},\n\tpages = {2225--2236},\n\ttitle = {An efficient fully sequential selection procedure guaranteeing probably approximately correct selection},\n\tyear = {2017}}\n\n
\n
\n\n\n
\n Ranking and Selection (R&S) procedures are designed for selecting the best among a finite set of systems using stochastic simulation, guaranteeing the quality of the final selection. Instead of assuming a known lower bound on the difference between the best and others, we consider the probably approximately correct (PAC) selection formulation, which ensures a high quality solution with high probability for all configurations. In this paper, we present a new fully sequential selection procedure, called the Envelope Procedure (EP), which accommodates a variety of sampling rules. A particular sampling rule that achieves good efficiency is proposed. We compare the efficiency of the EP with some existing procedures in numerical experiments, and the results show that the EP saves considerable computational effort in many problem configurations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bayes-optimal entropy pursuit for active choice-based preference learning.\n \n \n \n \n\n\n \n Pallone, S. N.; Frazier, P. I.; and Henderson, S. G.\n\n\n \n\n\n\n Submitted. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Bayes-optimal arxiv\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{palfrahen17,\n\tauthor = {S. N. Pallone and P. I. Frazier and S. G. Henderson},\n\tdate-added = {2017-02-24 16:59:13 +0000},\n\tdate-modified = {2017-02-24 17:01:00 +0000},\n\tjournal = {Submitted},\n\ttitle = {Bayes-optimal entropy pursuit for active choice-based preference learning},\n\turl_arxiv = {https://arxiv.org/abs/1702.07694},\n\tyear = {2017}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient ranking and selection in high performance computing environments.\n \n \n \n \n\n\n \n Ni, E. C.; Ciocan, D. F.; Henderson, S. G.; and Hunter, S. R.\n\n\n \n\n\n\n Operations Research, 65(3): 821–836. 2017.\n \n\n\n\n
\n\n\n\n \n \n \"Efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{nietal17,\n\tabstract = {The goal of ranking and selection (R&S) procedures is to identify the best stochastic system from among a finite set of competing alternatives. Such procedures require constructing estimates of each system's performance, which can be obtained simultaneously by running multiple independent replications on a parallel computing platform. Nontrivial statistical and implementation issues arise when designing R&S procedures for a parallel computing environment. We propose several design principles for parallel R&S procedures that preserve statistical validity and maximize core utilization, especially when large numbers of alternatives or cores are involved. These principles are followed closely by our parallel Good Selection Procedure (GSP), which, under the assumption of normally distributed output, (i) guarantees to select a system in the indifference zone with high probability, (ii) in tests on up to 1,024 parallel cores runs efficiently, and (iii) in an example uses smaller sample sizes compared to existing parallel procedures, particularly for large problems (over 106 alternatives). In our computational study we discuss 3 methods for implementing GSP on parallel computers, namely the Message-Passing Interface (MPI), Hadoop MapReduce, and Spark, and show that Spark provides a good compromise between the efficiency of MPI and robustness to core failures.},\n\tauthor = {Eric C. Ni and Dragos F.\\ Ciocan and Shane G.\\ Henderson and Susan R.\\ Hunter},\n\tdate-added = {2016-09-30 16:55:01 +0000},\n\tdate-modified = {2017-05-17 11:28:11 +0000},\n\tjournal = {Operations Research},\n\tnumber = {3},\n\tpages = {821--836},\n\ttitle = {Efficient ranking and selection in high performance computing environments},\n\turl_paper = {pubs/ParallelRS.pdf},\n\tvolume = {65},\n\tyear = {2017}}\n\n
\n
\n\n\n
\n The goal of ranking and selection (R&S) procedures is to identify the best stochastic system from among a finite set of competing alternatives. Such procedures require constructing estimates of each system's performance, which can be obtained simultaneously by running multiple independent replications on a parallel computing platform. Nontrivial statistical and implementation issues arise when designing R&S procedures for a parallel computing environment. We propose several design principles for parallel R&S procedures that preserve statistical validity and maximize core utilization, especially when large numbers of alternatives or cores are involved. These principles are followed closely by our parallel Good Selection Procedure (GSP), which, under the assumption of normally distributed output, (i) guarantees to select a system in the indifference zone with high probability, (ii) in tests on up to 1,024 parallel cores runs efficiently, and (iii) in an example uses smaller sample sizes compared to existing parallel procedures, particularly for large problems (over 106 alternatives). In our computational study we discuss 3 methods for implementing GSP on parallel computers, namely the Message-Passing Interface (MPI), Hadoop MapReduce, and Spark, and show that Spark provides a good compromise between the efficiency of MPI and robustness to core failures.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2016\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Large-network travel time distribution estimation for ambulances.\n \n \n \n\n\n \n Westgate, B. S; Woodard, D. B; Matteson, D. S; and Henderson, S. G\n\n\n \n\n\n\n European Journal of Operational Research, 252(1): 322–333. 2016.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{wesetal14,\n\tauthor = {Westgate, Bradford S and Woodard, Dawn B and Matteson, David S and Henderson, Shane G},\n\tdate-added = {2023-03-20 16:16:24 -0400},\n\tdate-modified = {2023-03-20 16:16:24 -0400},\n\tjournal = {European Journal of Operational Research},\n\tnumber = {1},\n\tpages = {322--333},\n\tpublisher = {Elsevier},\n\ttitle = {Large-network travel time distribution estimation for ambulances},\n\tvolume = {252},\n\tyear = {2016}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simulation optimization for a large-scale bike-sharing system.\n \n \n \n \n\n\n \n Jian, N.; Freund, D.; Wiberg, H.; and Henderson, S. G.\n\n\n \n\n\n\n In Roeder, T. M. K.; Frazier, P. I.; Szechtman, R.; and Zhou, E., editor(s), Proceedings of the 2016 Winter Simulation Conference, pages 602–613, Piscataway NJ, 2016. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Simulation paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{jiaetal16,\n\tabstract = {The Citi Bike system in New York City has approximately 466 stations, 6074 bikes, and 15777 docks. We wish to optimize both bike and dock allocations for each station at the beginning of the day, so that the expected number of customers who cannot find a bike, or cannot find a dock to return a bike, is minimized. With a system of this scale, traditional simulation optimization methods such as stochastic gradient-search and random search are inefficient. We propose a variety of more efficient gradient-like heuristic methods that can improve any given allocation based on a discrete-event simulation model of the system. The methods are tested on data from December 2015 with different starting solutions obtained from other models. We further explore the relationship between the system behaviors during the morning and afternoon rush hours by comparing optimal solutions when the problem is restricted to these two periods.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Nanjing Jian and Daniel Freund and Holly Wiberg and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2016 Winter Simulation Conference},\n\tdate-added = {2017-05-23 11:56:31 +0000},\n\tdate-modified = {2017-05-23 11:56:31 +0000},\n\teditor = {T. M. K. Roeder and P. I. Frazier and R. Szechtman and E. Zhou},\n\torganization = {IEEE},\n\tpages = {602--613},\n\ttitle = {Simulation optimization for a large-scale bike-sharing system},\n\turl_paper = {http://www.informs-sim.org/wsc16papers/054.pdf},\n\tyear = {2016}}\n\n
\n
\n\n\n
\n The Citi Bike system in New York City has approximately 466 stations, 6074 bikes, and 15777 docks. We wish to optimize both bike and dock allocations for each station at the beginning of the day, so that the expected number of customers who cannot find a bike, or cannot find a dock to return a bike, is minimized. With a system of this scale, traditional simulation optimization methods such as stochastic gradient-search and random search are inefficient. We propose a variety of more efficient gradient-like heuristic methods that can improve any given allocation based on a discrete-event simulation model of the system. The methods are tested on data from December 2015 with different starting solutions obtained from other models. We further explore the relationship between the system behaviors during the morning and afternoon rush hours by comparing optimal solutions when the problem is restricted to these two periods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Enforcing separation of visits in an air-ambulance application.\n \n \n \n\n\n \n Guo, J.; Henderson, S. G.; and Steele, P. R.\n\n\n \n\n\n\n 2016.\n Submitted\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{guohenste16,\n\tabstract = {Ornge is an air-ambulance provider operating in Ontario, Canada. Non-urgent calls for transport are scheduled one day in advance using an existing set-partitioning formulation. One location requires that patient pickups and drop-offs be spaced in time. We show how to extend the formulation to allow this.},\n\tauthor = {J. Guo and S. G. Henderson and P. R. Steele},\n\tdate-added = {2016-10-04 16:51:41 +0000},\n\tdate-modified = {2016-10-06 15:03:24 +0000},\n\tnote = {Submitted},\n\ttitle = {Enforcing separation of visits in an air-ambulance application},\n\tyear = {2016}}\n\n
\n
\n\n\n
\n Ornge is an air-ambulance provider operating in Ontario, Canada. Non-urgent calls for transport are scheduled one day in advance using an existing set-partitioning formulation. One location requires that patient pickups and drop-offs be spaced in time. We show how to extend the formulation to allow this.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimating the density of a conditional expectation.\n \n \n \n \n\n\n \n Steckley, S. G.; Henderson, S. G.; Ruppert, D.; Yang, R.; Apley, D. W.; and Staum, J.\n\n\n \n\n\n\n Electronic Journal of Statistics, 10: 736–760. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Estimating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{steetal16,\n\tabstract = {In this paper, we analyze methods for estimating the density of a conditional expectation. We compare an estimator based on a straightforward application of kernel density estimation to a bias-corrected estimator that we propose. We prove convergence results for these estimators and show that the bias-corrected estimator has a superior rate of convergence. In a simulated test case, we show that the bias-corrected estimator performs better in a practical example with a realistic sample size.},\n\tauthor = {S. G. Steckley and S. G. Henderson and D. Ruppert and R. Yang and D. W. Apley and J. Staum},\n\tdate-added = {2016-01-10 17:42:37 +0000},\n\tdate-modified = {2017-06-10 15:53:38 +0000},\n\tjournal = {Electronic Journal of Statistics},\n\tpages = {736--760},\n\ttitle = {Estimating the density of a conditional expectation},\n\turl_paper = {pubs/Steckleyetal2016.pdf},\n\tvolume = {10},\n\tyear = {2016}}\n\n
\n
\n\n\n
\n In this paper, we analyze methods for estimating the density of a conditional expectation. We compare an estimator based on a straightforward application of kernel density estimation to a bias-corrected estimator that we propose. We prove convergence results for these estimators and show that the bias-corrected estimator has a superior rate of convergence. In a simulated test case, we show that the bias-corrected estimator performs better in a practical example with a realistic sample size.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n (Citi)Bike sharing.\n \n \n \n\n\n \n Henderson, S. G.; O'Mahony, E.; and Shmoys, D. B.\n\n\n \n\n\n\n 2016.\n Submitted\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{henomashm16,\n\tabstract = {We report on an extended and ongoing engagement with NYC Bike Share LLC, the operators of the bike- sharing system Citibike in New York City (NYC). Perhaps the central problem in operating a bike-sharing system is that of maintaining system balance, so that to the extent possible users can find bikes when they want them, and find open docks when they want to return a bike. Rebalancing problems can be naturally classified into the overnight rebalancing problem when trucks can move relatively easily through a city, and the mid-rush rebalancing problem when, at least in NYC, traffic congestion prevents the use of trucks, and instead bike trailers with very limited capacity can be towed by bikers in bike lanes. We present a method to determine the allocation of both bikes and docks across a city, proving structural properties that admit an efficient solution at the scale of NYC operations. This method is used at a strategic level to plan the deployment of bikes and racks across the city. We then present operational-planning formulations that attempt, as closely as possible, to match a desired deployment plan within realistic resource constraints. These methods are in use in NYC to help Citibike with their daily challenges, and to assist with expansion planning as the footprint of stations grows.},\n\tauthor = {Shane G. Henderson and Eoin O'Mahony and David B. Shmoys},\n\tdate-added = {2016-01-10 17:42:15 +0000},\n\tdate-modified = {2016-01-10 17:42:15 +0000},\n\tnote = {Submitted},\n\ttitle = {(Citi)Bike sharing},\n\tyear = {2016}}\n\n
\n
\n\n\n
\n We report on an extended and ongoing engagement with NYC Bike Share LLC, the operators of the bike- sharing system Citibike in New York City (NYC). Perhaps the central problem in operating a bike-sharing system is that of maintaining system balance, so that to the extent possible users can find bikes when they want them, and find open docks when they want to return a bike. Rebalancing problems can be naturally classified into the overnight rebalancing problem when trucks can move relatively easily through a city, and the mid-rush rebalancing problem when, at least in NYC, traffic congestion prevents the use of trucks, and instead bike trailers with very limited capacity can be towed by bikers in bike lanes. We present a method to determine the allocation of both bikes and docks across a city, proving structural properties that admit an efficient solution at the scale of NYC operations. This method is used at a strategic level to plan the deployment of bikes and racks across the city. We then present operational-planning formulations that attempt, as closely as possible, to match a desired deployment plan within realistic resource constraints. These methods are in use in NYC to help Citibike with their daily challenges, and to assist with expansion planning as the footprint of stations grows.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A bound on the performance of optimal ambulance redeployment policies in loss systems.\n \n \n \n\n\n \n Chong, K. C.; Henderson, S. G.; Lewis, M. E.; and Topaloglu, H.\n\n\n \n\n\n\n 2016.\n Submitted\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{choetal16,\n\tabstract = {Ambulance redeployment refers to the practice of strategically relocating ambulances in real time to better respond to future call arrivals. Optimal redeployment policies can be found, in theory, via stochastic dynamic programming, but this approach succumbs to the curse of dimensionality. We instead consider the problem of finding an upper bound on the performance of optimal redeployment policies, which can be used, for instance, as a benchmark against which heuristic policies can be compared. Such a bound has been developed for systems in which calls can queue when no ambulances are available. We adapt this bound to loss systems, corresponding to the fairly common situation in which arriving calls are redirected to an alternative service if they cannot be immediately served. This adaptation is nontrivial, and involves the introduction of new ideas, including an integer program whose feasible region contains the set of decisions made by any member of a fairly general class of redeployment policies. We prove the validity of our upper bound, and study its performance through computational experiments.},\n\tauthor = {K. C. Chong and S. G. Henderson and M. E. Lewis and H. Topaloglu},\n\tdate-added = {2016-01-10 17:38:59 +0000},\n\tdate-modified = {2016-01-10 17:40:25 +0000},\n\tnote = {Submitted},\n\ttitle = {A bound on the performance of optimal ambulance redeployment policies in loss systems},\n\tyear = {2016}}\n\n
\n
\n\n\n
\n Ambulance redeployment refers to the practice of strategically relocating ambulances in real time to better respond to future call arrivals. Optimal redeployment policies can be found, in theory, via stochastic dynamic programming, but this approach succumbs to the curse of dimensionality. We instead consider the problem of finding an upper bound on the performance of optimal redeployment policies, which can be used, for instance, as a benchmark against which heuristic policies can be compared. Such a bound has been developed for systems in which calls can queue when no ambulances are available. We adapt this bound to loss systems, corresponding to the fairly common situation in which arriving calls are redirected to an alternative service if they cannot be immediately served. This adaptation is nontrivial, and involves the introduction of new ideas, including an integer program whose feasible region contains the set of decisions made by any member of a fairly general class of redeployment policies. We prove the validity of our upper bound, and study its performance through computational experiments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The vehicle mix decision in emergency medical service systems.\n \n \n \n \n\n\n \n Chong, K. C.; Henderson, S. G.; and Lewis, M. E.\n\n\n \n\n\n\n Manufacturing & Service Operations Management, 18(3): 347–360. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{chohenlew14,\n\tabstract = {We consider the problem of selecting the number of Advanced Life Support (ALS) and Basic Life Support (BLS) ambulances - the vehicle mix - to deploy in an Emergency Medical Service (EMS) system, given a budget constraint. ALS ambulances can treat a wider range of emergencies, while BLS ambulances are less expensive to operate. To this end, we develop a framework under which the performance of a system operating under a given vehicle mix can be evaluated. Because the choice of vehicle mix affects how ambulances are dispatched to incoming calls, as well as how they are deployed to base locations, we adopt an optimization- based approach. We construct two models - one a Markov decision process, the other an integer program - to study the problems of dispatching and deployment in a tiered system, respectively. In each case, the objective function value attained by an optimal decision serves as our performance measure. Numerical experiments performed with both models suggest that, under reasonable choices of inputs, a wide range of tiered systems perform comparably to all-ALS fleets.},\n\tauthor = {Kenneth C. Chong and Shane G. Henderson and Mark E. Lewis},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2017-01-06 16:48:13 +0000},\n\tjournal = {Manufacturing \\& Service Operations Management},\n\tnumber = {3},\n\tpages = {347--360},\n\ttitle = {The vehicle mix decision in emergency medical service systems},\n\turl_paper = {pubs/vehiclemix.pdf},\n\tvolume = {18},\n\tyear = {2016}}\n\n
\n
\n\n\n
\n We consider the problem of selecting the number of Advanced Life Support (ALS) and Basic Life Support (BLS) ambulances - the vehicle mix - to deploy in an Emergency Medical Service (EMS) system, given a budget constraint. ALS ambulances can treat a wider range of emergencies, while BLS ambulances are less expensive to operate. To this end, we develop a framework under which the performance of a system operating under a given vehicle mix can be evaluated. Because the choice of vehicle mix affects how ambulances are dispatched to incoming calls, as well as how they are deployed to base locations, we adopt an optimization- based approach. We construct two models - one a Markov decision process, the other an integer program - to study the problems of dispatching and deployment in a tiered system, respectively. In each case, the objective function value attained by an optimal decision serves as our performance measure. Numerical experiments performed with both models suggest that, under reasonable choices of inputs, a wide range of tiered systems perform comparably to all-ALS fleets.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Coupled bisection for root ordering.\n \n \n \n \n\n\n \n Pallone, S.; Frazier, P. I.; and Henderson, S. G.\n\n\n \n\n\n\n Operations Research Letters, 44(2): 165–169. 2016.\n \n\n\n\n
\n\n\n\n \n \n \"Coupled paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{palfrahen16,\n\tabstract = {We consider the problem of solving multiple ``coupled'' root-finding problems at once, in that we can evaluate every function at the same point simultaneously. Using a dynamic programming formulation, we show that a sequential bisection algorithm is a close-to-optimal method for finding a ranking with respect to the zeros of these functions. We show the ranking can be found in linear time, prove an asymptotic approximation guarantee of 1.44, and conjecture that this policy is near-optimal.},\n\tauthor = {Stephen Pallone and Peter I. Frazier and Shane G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-02-01 18:00:07 +0000},\n\tjournal = {Operations Research Letters},\n\tnumber = {2},\n\tpages = {165--169},\n\ttitle = {Coupled bisection for root ordering},\n\turl_paper = {pubs/CoupledBisection.pdf},\n\tvolume = {44},\n\tyear = {2016}}\n\n
\n
\n\n\n
\n We consider the problem of solving multiple ``coupled'' root-finding problems at once, in that we can evaluate every function at the same point simultaneously. Using a dynamic programming formulation, we show that a sequential bisection algorithm is a close-to-optimal method for finding a ranking with respect to the zeros of these functions. We show the ranking can be found in linear time, prove an asymptotic approximation guarantee of 1.44, and conjecture that this policy is near-optimal.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2015\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n An introduction to simulation optimization.\n \n \n \n \n\n\n \n Jian, N.; and Henderson, S. G.\n\n\n \n\n\n\n In Yilmaz, L.; Chan, W. K. V.; Roeder, T. M. K.; Macal, C.; and Rosetti, M., editor(s), Proceedings of the 2015 Winter Simulation Conference, pages 1780–1794, Piscataway NJ, 2015. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"An paper\n  \n \n \n \"An slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 10 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{jiahen15,\n\tabstract = {In this tutorial we give an introduction to simulation optimization, covering its general form, central issues and common problems, basic methods, and a case study. Our target audience is users with experience in using simulation, but not necessarily experience with optimization. We offer guiding principles, and point to surveys and other tutorials that provide further information.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Nanjing Jian and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2015 Winter Simulation Conference},\n\tdate-added = {2016-01-10 19:21:56 +0000},\n\tdate-modified = {2016-01-10 19:21:56 +0000},\n\teditor = {L. Yilmaz and W. K. V. Chan and T. M. K. Roeder and C. Macal and M.D. Rosetti},\n\torganization = {IEEE},\n\tpages = {1780--1794},\n\ttitle = {An introduction to simulation optimization},\n\turl_paper = {pubs/WSC2015Tut.pdf},\n\turl_slides = {pubs/WSC2015TutSlides.pdf},\n\tyear = {2015}}\n\n
\n
\n\n\n
\n In this tutorial we give an introduction to simulation optimization, covering its general form, central issues and common problems, basic methods, and a case study. Our target audience is users with experience in using simulation, but not necessarily experience with optimization. We offer guiding principles, and point to surveys and other tutorials that provide further information.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Predicting bike usage for New York City's bike sharing system.\n \n \n \n \n\n\n \n Singhvi, D.; Singhvi, S.; Frazier, P. I.; Henderson, S. G.; Mahony, E. O.; Shmoys, D. B.; and Woodard, D. B.\n\n\n \n\n\n\n In Association for the Advancement of Artificial Intelligence Proceedings, 2015. \n \n\n\n\n
\n\n\n\n \n \n \"Predicting paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{sinetal15,\n\tabstract = {Bike sharing systems consist of a fleet of bikes placed in a network of docking stations. These bikes can then be rented and returned to any of the docking stations after usage. Predicting unrealized bike demand at locations currently without bike stations is important for effectively designing and expanding bike sharing systems. We predict pairwise bike demand for New York City's Citi Bike system. Since the system is driven by daily commuters we focus only on the morning rush hours between 7:00 AM to 11:00 AM during weekdays. We use taxi usage, weather and spatial variables as covariates to predict bike demand, and further analyze the influence of precipitation and day of week. We show that aggregating stations in neighborhoods can substantially improve predictions. The presented model can assist planners by predicting bike demand at a macroscopic level, between pairs of neighborhoods.},\n\tauthor = {Divya Singhvi and Somya Singhvi and Peter I. Frazier and Shane G. Henderson and Eoin O' Mahony and David B. Shmoys and Dawn B. Woodard},\n\tbooktitle = {Association for the Advancement of Artificial Intelligence Proceedings},\n\tdate-added = {2016-01-10 19:19:05 +0000},\n\tdate-modified = {2016-01-10 19:20:09 +0000},\n\ttitle = {Predicting bike usage for New York City's bike sharing system},\n\turl_paper = {pubs/AAAI15.pdf},\n\tyear = {2015}}\n\n
\n
\n\n\n
\n Bike sharing systems consist of a fleet of bikes placed in a network of docking stations. These bikes can then be rented and returned to any of the docking stations after usage. Predicting unrealized bike demand at locations currently without bike stations is important for effectively designing and expanding bike sharing systems. We predict pairwise bike demand for New York City's Citi Bike system. Since the system is driven by daily commuters we focus only on the morning rush hours between 7:00 AM to 11:00 AM during weekdays. We use taxi usage, weather and spatial variables as covariates to predict bike demand, and further analyze the influence of precipitation and day of week. We show that aggregating stations in neighborhoods can substantially improve predictions. The presented model can assist planners by predicting bike demand at a macroscopic level, between pairs of neighborhoods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparing message passing interface and mapreduce for large-scale parallel ranking and selection.\n \n \n \n \n\n\n \n Ni, E. C.; Ciocan, D. F.; Henderson, S. G.; and Hunter, S. R.\n\n\n \n\n\n\n In Yilmaz, L.; Chan, W. K. V.; Roeder, T. M. K.; Macal, C.; and Rosetti, M., editor(s), Proceedings of the 2015 Winter Simulation Conference, pages 3858–3867, Piscataway NJ, 2015. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Comparing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{nietal15,\n\tabstract = {We compare two methods for implementing ranking and selection algorithms in large-scale parallel computing environments. The Message Passing Interface (MPI) provides the programmer with complete control over sending and receiving messages between cores, and is fragile with regard to core failures or messages going awry. In contrast, MapReduce handles all communication and is quite robust, but is more rigid in terms of how algorithms can be coded. As expected in a high-performance computing context, we find that MPI is the more efficient of the two environments, although MapReduce is a reasonable choice. Accordingly, MapReduce may be attractive in environments where cores can stall or fail, such as is possible in low-budget cloud computing.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Eric C. Ni and Dragos F. Ciocan and Shane G. Henderson and Susan R. Hunter},\n\tbooktitle = {Proceedings of the 2015 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 19:22:59 +0000},\n\teditor = {L. Yilmaz and W. K. V. Chan and T. M. K. Roeder and C. Macal and M.D. Rosetti},\n\torganization = {IEEE},\n\tpages = {3858--3867},\n\ttitle = {Comparing message passing interface and mapreduce for large-scale parallel ranking and selection},\n\turl_paper = {pubs/WSC2015Ni.pdf},\n\tyear = {2015}}\n\n
\n
\n\n\n
\n We compare two methods for implementing ranking and selection algorithms in large-scale parallel computing environments. The Message Passing Interface (MPI) provides the programmer with complete control over sending and receiving messages between cores, and is fragile with regard to core failures or messages going awry. In contrast, MapReduce handles all communication and is quite robust, but is more rigid in terms of how algorithms can be coded. As expected in a high-performance computing context, we find that MPI is the more efficient of the two environments, although MapReduce is a reasonable choice. Accordingly, MapReduce may be attractive in environments where cores can stall or fail, such as is possible in low-budget cloud computing.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Guide to Sample Average Approximation.\n \n \n \n \n\n\n \n Kim, S.; Pasupathy, R.; and Henderson, S. G.\n\n\n \n\n\n\n In Fu, M. C., editor(s), Handbook of Simulation Optimization, volume 216, of International Series in Operations Research & Management Science, 8. Springer New York, 2015.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 4 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{kimpashen12,\n\tabstract = {We provide a review of the principle of sample-average approximation (SAA) for solving simulation-optimization problems. Our goal is to provide an accessible overview of the area and emphasize in- teresting recent work. We explain when one might want to use SAA and when one might expect it to provide good-quality solutions. We also review some of the key theoretical properties of the solutions obtained through SAA. We contrast SAA with stochastic approximation (SA) methods in terms of the computational effort required to obtain solutions of a given quality, explaining why SA ``wins'' asymptotically. However, an extension of SAA known as retrospective optimization can match the asymptotic convergence rate of SA, at least up to a multiplicative constant.},\n\tauthor = {Sujin Kim and Raghu Pasupathy and Shane G. Henderson},\n\tbooktitle = {Handbook of Simulation Optimization},\n\tchapter = {8},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {Michael C. Fu},\n\tpublisher = {Springer New York},\n\tseries = {International Series in Operations Research \\& Management Science},\n\ttitle = {A Guide to Sample Average Approximation},\n\turl_paper = {pubs/SAAGuide.pdf},\n\tvolume = {216},\n\tyear = {2015}}\n\n
\n
\n\n\n
\n We provide a review of the principle of sample-average approximation (SAA) for solving simulation-optimization problems. Our goal is to provide an accessible overview of the area and emphasize in- teresting recent work. We explain when one might want to use SAA and when one might expect it to provide good-quality solutions. We also review some of the key theoretical properties of the solutions obtained through SAA. We contrast SAA with stochastic approximation (SA) methods in terms of the computational effort required to obtain solutions of a given quality, explaining why SA ``wins'' asymptotically. However, an extension of SAA known as retrospective optimization can match the asymptotic convergence rate of SA, at least up to a multiplicative constant.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n How hard are steady-state queueing simulations?.\n \n \n \n \n\n\n \n Ni, E. C.; and Henderson, S. G.\n\n\n \n\n\n\n ACM Transactions on Modeling and Computer Simulation, 25(4): Article 27. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"How paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{nihen14,\n\tabstract = {Some queueing systems require tremendously long simulation runlengths to obtain accurate estimators of certain steady-state performance measures when the servers are heavily utilized. However, this is not uniformly the case. We analyze a number of single-station Markovian queueing models, demonstrating that several steady-state performance measures can be accurately estimated with modest runlengths. Our analysis reinforces the meta result that if the queue is ``well dimensioned,'' then simulation runlengths will be modest. Queueing systems can be well dimensioned because customers abandon if they are forced to wait in line too long, or because the queue is operated in the ``quality and efficiency driven regime'' where servers are heavily utilized but wait times are short. The results are based on computing or bounding the asymptotic variance and bias for several standard single-station queueing models and performance measures.},\n\tannote = {pubs/howhard.pdf},\n\tauthor = {Eric Cao Ni and Shane G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2018-07-16 13:39:10 +0000},\n\tjournal = {ACM Transactions on Modeling and Computer Simulation},\n\tnumber = {4},\n\tpages = {Article 27},\n\ttitle = {How hard are steady-state queueing simulations?},\n\turl_paper = {https://dl.acm.org/authorize?N654062},\n\tvolume = {25},\n\tyear = {2015}}\n\n
\n
\n\n\n
\n Some queueing systems require tremendously long simulation runlengths to obtain accurate estimators of certain steady-state performance measures when the servers are heavily utilized. However, this is not uniformly the case. We analyze a number of single-station Markovian queueing models, demonstrating that several steady-state performance measures can be accurately estimated with modest runlengths. Our analysis reinforces the meta result that if the queue is ``well dimensioned,'' then simulation runlengths will be modest. Queueing systems can be well dimensioned because customers abandon if they are forced to wait in line too long, or because the queue is operated in the ``quality and efficiency driven regime'' where servers are heavily utilized but wait times are short. The results are based on computing or bounding the asymptotic variance and bias for several standard single-station queueing models and performance measures.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A spatio-temporal point process model for ambulance demand.\n \n \n \n \n\n\n \n Zhou, Z.; Matteson, D. S.; Woodard, D. B.; Henderson, S. G.; and Micheas, A. C.\n\n\n \n\n\n\n Journal of the American Statistical Association – Applications and Case Studies, 110(509): 6–15. 2015.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{zhoetal14,\n\tabstract = {Ambulance demand estimation at fine time and location scales is critical for fleet management and dynamic deployment. We are motivated by the problem of estimating the spatial distribution of ambulance demand in Toronto, Canada, as it changes over discrete 2-hour intervals. This large-scale dataset is sparse at the desired temporal resolutions and exhibits location-specific serial dependence, daily and weekly seasonality. We address these challenges by introducing a novel characterization of time-varying Gaussian mixture models. We fix the mixture component distributions across all time periods to overcome data sparsity and accurately describe Toronto's spatial structure, while representing the complex spatio-temporal dynamics through time-varying mixture weights. We constrain the mixture weights to capture weekly seasonality, and apply a conditionally autoregressive prior on the mixture weights of each component to represent location-specific short-term serial dependence and daily seasonality. While estimation may be performed using a fixed number of mixture components, we also extend to estimate the number of components using birth-and-death Markov chain Monte Carlo. The proposed model is shown to give higher statistical predictive accuracy and to reduce the error in predicting EMS operational performance by as much as two-thirds compared to a typical industry practice.},\n\tauthor = {Z. Zhou and D. S. Matteson and D. B. Woodard and S. G. Henderson and A. C. Micheas},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Journal of the {A}merican {S}tatistical {A}ssociation -- Applications and Case Studies},\n\tnumber = {509},\n\tpages = {6--15},\n\ttitle = {A spatio-temporal point process model for ambulance demand},\n\turl_paper = {pubs/spatiotemp.pdf},\n\tvolume = {110},\n\tyear = {2015}}\n
\n
\n\n\n
\n Ambulance demand estimation at fine time and location scales is critical for fleet management and dynamic deployment. We are motivated by the problem of estimating the spatial distribution of ambulance demand in Toronto, Canada, as it changes over discrete 2-hour intervals. This large-scale dataset is sparse at the desired temporal resolutions and exhibits location-specific serial dependence, daily and weekly seasonality. We address these challenges by introducing a novel characterization of time-varying Gaussian mixture models. We fix the mixture component distributions across all time periods to overcome data sparsity and accurately describe Toronto's spatial structure, while representing the complex spatio-temporal dynamics through time-varying mixture weights. We constrain the mixture weights to capture weekly seasonality, and apply a conditionally autoregressive prior on the mixture weights of each component to represent location-specific short-term serial dependence and daily seasonality. While estimation may be performed using a fixed number of mixture components, we also extend to estimate the number of components using birth-and-death Markov chain Monte Carlo. The proposed model is shown to give higher statistical predictive accuracy and to reduce the error in predicting EMS operational performance by as much as two-thirds compared to a typical industry practice.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2014\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n A comparison of two parallel ranking and selection procedures.\n \n \n \n \n\n\n \n Ni, E. C.; Hunter, S. R.; and Henderson, S. G.\n\n\n \n\n\n\n In Tolk, A.; Diallo, D.; Ryzhov, I. O.; Yilmaz, L.; Buckley, S.; and Miller, J. A., editor(s), Proceedings of the 2014 Winter Simulation Conference, pages 3761–3772, Piscataway NJ, 2014. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{nietal14,\n\tabstract = {Traditional solutions to ranking and selection problems include two-stage procedures (e.g., the NSGS procedure of Nelson et al. 2001) and fully-sequential screening procedures (e.g., Kim and Nelson 2001 and Hong 2006). In a parallel computing environment, a naively-parallelized NSGS procedure may require more simulation replications than a sequential screening procedure such as that of Ni, Hunter, and Henderson (2013) (NHH), but requires less communication since there is no periodic screening. The parallel procedure NHH may require less simulation replications overall, but requires more communication to implement periodic screening. We numerically explore the trade-offs between these two procedures on a parallel computing platform. In particular, we discuss their statistical validity, efficiency, and implementation, including communication and load-balancing. Inspired by the comparison results, we propose a framework for hybrid procedures that may further reduce simulation cost or guarantee to select a good system when multiple systems are clustered near the best.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Eric Cao Ni and Susan R. Hunter and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2014 Winter Simulation Conference},\n\tdate-added = {2016-01-10 18:06:38 +0000},\n\tdate-modified = {2016-01-10 18:06:38 +0000},\n\teditor = {A. Tolk and D. Diallo and I. O. Ryzhov and L. Yilmaz and S. Buckley and J. A. Miller},\n\torganization = {IEEE},\n\tpages = {3761--3772},\n\ttitle = {A comparison of two parallel ranking and selection procedures},\n\turl_paper = {pubs/wsc14parallel.pdf},\n\tyear = {2014}}\n\n
\n
\n\n\n
\n Traditional solutions to ranking and selection problems include two-stage procedures (e.g., the NSGS procedure of Nelson et al. 2001) and fully-sequential screening procedures (e.g., Kim and Nelson 2001 and Hong 2006). In a parallel computing environment, a naively-parallelized NSGS procedure may require more simulation replications than a sequential screening procedure such as that of Ni, Hunter, and Henderson (2013) (NHH), but requires less communication since there is no periodic screening. The parallel procedure NHH may require less simulation replications overall, but requires more communication to implement periodic screening. We numerically explore the trade-offs between these two procedures on a parallel computing platform. In particular, we discuss their statistical validity, efficiency, and implementation, including communication and load-balancing. Inspired by the comparison results, we propose a framework for hybrid procedures that may further reduce simulation cost or guarantee to select a good system when multiple systems are clustered near the best.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Multisection: Parallelized bisection.\n \n \n \n \n\n\n \n Pallone, S.; Frazier, P. I.; and Henderson, S. G.\n\n\n \n\n\n\n In Tolk, A.; Diallo, D.; Ryzhov, I. O.; Yilmaz, L.; Buckley, S.; and Miller, J. A., editor(s), Proceedings of the 2014 Winter Simulation Conference, pages 3773–3784, Piscataway NJ, 2014. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Multisection: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{palfrahen14,\n\tabstract = {We consider a one-dimensional bisection method for finding the zero of a function, where function evaluations can be performed asynchronously in a parallel computing environment. Using dynamic programming, we characterize the Bayes-optimal policy for sequentially choosing points at which to query the function. In choosing these points, we face a trade-off between aggressively reducing the search space in the short term, and maintaining a desirable spread of queries in the long-term. Our results provide insight on how this trade-off is affected by function evaluation times, risk preferences, and computational budget.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Stephen Pallone and Peter I. Frazier and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2014 Winter Simulation Conference},\n\tdate-added = {2016-01-10 17:15:46 +0000},\n\tdate-modified = {2018-01-27 21:13:55 +0000},\n\teditor = {A. Tolk and D. Diallo and I. O. Ryzhov and L. Yilmaz and S. Buckley and J. A. Miller},\n\torganization = {IEEE},\n\tpages = {3773--3784},\n\ttitle = {Multisection: Parallelized bisection},\n\turl_paper = {pubs/wsc14multisection.pdf},\n\tyear = {2014}}\n\n
\n
\n\n\n
\n We consider a one-dimensional bisection method for finding the zero of a function, where function evaluations can be performed asynchronously in a parallel computing environment. Using dynamic programming, we characterize the Bayes-optimal policy for sequentially choosing points at which to query the function. In choosing these points, we face a trade-off between aggressively reducing the search space in the short term, and maintaining a desirable spread of queries in the long-term. Our results provide insight on how this trade-off is affected by function evaluation times, risk preferences, and computational budget.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sequential detection of convexity from noisy function evaluations.\n \n \n \n \n\n\n \n Jian, N.; Henderson, S. G.; and Hunter, S. R.\n\n\n \n\n\n\n In Tolk, A.; Diallo, D.; Ryzhov, I. O.; Yilmaz, L.; Buckley, S.; and Miller, J. A., editor(s), Proceedings of the 2014 Winter Simulation Conference, pages 3892–3903, Piscataway NJ, 2014. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Sequential paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 14 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{jiahenhun14,\n\tabstract = {Consider a real-valued function that can only be evaluated with error. Given estimates of the function values from simulation on a finite set of points, we seek a procedure to detect convexity or non-convexity of the true function restricted to those points. We review an existing frequentist hypothesis test, and introduce a sequential Bayesian procedure. Our Bayesian procedure applies for both independent sampling and sampling with common random numbers, with known or unknown sampling variance. In each iteration, we collect a set of samples and update a posterior distribution on the function values, and use that as the prior belief in our next iteration. We then approximate the probability that the function is convex based on the posterior using Monte Carlo simulation.\n},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Nanjing Jian and Shane G. Henderson and Susan R. Hunter},\n\tbooktitle = {Proceedings of the 2014 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {A. Tolk and D. Diallo and I. O. Ryzhov and L. Yilmaz and S. Buckley and J. A. Miller},\n\torganization = {IEEE},\n\tpages = {3892--3903},\n\ttitle = {Sequential detection of convexity from noisy function evaluations},\n\turl_paper = {pubs/wsc14convexity.pdf},\n\tyear = {2014}}\n\n
\n
\n\n\n
\n Consider a real-valued function that can only be evaluated with error. Given estimates of the function values from simulation on a finite set of points, we seek a procedure to detect convexity or non-convexity of the true function restricted to those points. We review an existing frequentist hypothesis test, and introduce a sequential Bayesian procedure. Our Bayesian procedure applies for both independent sampling and sampling with common random numbers, with known or unknown sampling variance. In each iteration, we collect a set of samples and update a posterior distribution on the function values, and use that as the prior belief in our next iteration. We then approximate the probability that the function is convex based on the posterior using Monte Carlo simulation. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A novel application to optimize aircraft utilization for non-urgent air transfers.\n \n \n \n \n\n\n \n MacDonald, R. D.; Ahghari, M.; Walker, L.; Carnes, T. A.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n Air Medical Journal, 33(1): 34–39. 2014.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{macetal14,\n\tabstract = {Introduction\nAir ambulances provide patients with timely access to referral centers. Non-emergent transfers are planned for efficient aircraft use. This study compares a novel flight planning optimization application to traditional planning methods.\n\nMethods\nThis prospective study compared real-world use of the application to traditional methods in a large air medical system. Each day was randomized to application use or manual methods. Descriptive statistics compared the resulting schedules through ratios of distance flown and cost to minimum distance required.\n\nResults\nManual methods were used on 33 days to plan 479 requests, yielding 181 flights, 856 flying hours and 289,627 kilometers (km) flown. Ratios of distance flown and cost were 1.47 km flown and \\$4.98 per km required. The application was used on 25 days to plan 360 requests, yielding 146 flights, 639 flying hours and 216,944 km flown. The corresponding ratios were 1.4 km flown and \\$4.65 per km required. Average distance flown per distance required decreased by 5\\% (p=0.07), and average cost per average required distance decreased by 7\\% (p=0.03) when using the application.\n\nConclusions\nProspective, real-world use of the application results in efficiencies when planning non-urgent patient transfers. Additional savings may be possible through further application refinements.},\n\tauthor = {Russell D. MacDonald and Mahvareh Ahghari and L. Walker and Tim A. Carnes and Shane G. Henderson and David B. Shmoys},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Air Medical Journal},\n\tnumber = {1},\n\tpages = {34--39},\n\ttitle = {A novel application to optimize aircraft utilization for non-urgent air transfers},\n\turl_paper = {pubs/Ornge-LTP.pdf},\n\tvolume = {33},\n\tyear = {2014}}\n\n
\n
\n\n\n
\n Introduction Air ambulances provide patients with timely access to referral centers. Non-emergent transfers are planned for efficient aircraft use. This study compares a novel flight planning optimization application to traditional planning methods. Methods This prospective study compared real-world use of the application to traditional methods in a large air medical system. Each day was randomized to application use or manual methods. Descriptive statistics compared the resulting schedules through ratios of distance flown and cost to minimum distance required. Results Manual methods were used on 33 days to plan 479 requests, yielding 181 flights, 856 flying hours and 289,627 kilometers (km) flown. Ratios of distance flown and cost were 1.47 km flown and $4.98 per km required. The application was used on 25 days to plan 360 requests, yielding 146 flights, 639 flying hours and 216,944 km flown. The corresponding ratios were 1.4 km flown and $4.65 per km required. Average distance flown per distance required decreased by 5% (p=0.07), and average cost per average required distance decreased by 7% (p=0.03) when using the application. Conclusions Prospective, real-world use of the application results in efficiencies when planning non-urgent patient transfers. Additional savings may be possible through further application refinements.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A bound on the performance of an optimal ambulance redeployment policy.\n \n \n \n \n\n\n \n Maxwell, M. S.; Ni, E. C.; Tong, C.; Hunter, S. R.; Henderson, S. G.; and Topaloglu, H.\n\n\n \n\n\n\n Operations Research, 62(5): 1014–1027. 2014.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n \n \"A slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 41 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{maxetal14,\n\tabstract = {Ambulance redeployment is the practice of repositioning ambulance fleets in real time in an attempt to reduce response times to future calls. When redeployment decisions are based on real-time information on the status and location of ambulances, the process is called system-status management. An important performance measure is the long-run fraction of calls with response times over some time threshold. We construct a lower bound on this performance measure that holds for nearly any ambulance redeployment policy through comparison methods for queues. The computation of the bound involves solving a number of integer programs and then simulating a multiserver queue. This work originated when one of the authors was asked to analyze a response to a request-for-proposals for ambulance services in a county in North America.},\n\tauthor = {Matthew S. Maxwell and Eric Cao Ni and Chaoxu Tong and Susan R. Hunter and Shane G. Henderson and Huseyin Topaloglu},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Operations Research},\n\tnumber = {5},\n\tpages = {1014--1027},\n\ttitle = {A bound on the performance of an optimal ambulance redeployment policy},\n\turl_paper = {pubs/BoundRedeploy.pdf},\n\turl_slides = {presentations/AmbDeploy.pdf},\n\tvolume = {62},\n\tyear = {2014}}\n\n
\n
\n\n\n
\n Ambulance redeployment is the practice of repositioning ambulance fleets in real time in an attempt to reduce response times to future calls. When redeployment decisions are based on real-time information on the status and location of ambulances, the process is called system-status management. An important performance measure is the long-run fraction of calls with response times over some time threshold. We construct a lower bound on this performance measure that holds for nearly any ambulance redeployment policy through comparison methods for queues. The computation of the bound involves solving a number of integer programs and then simulating a multiserver queue. This work originated when one of the authors was asked to analyze a response to a request-for-proposals for ambulance services in a county in North America.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simulation optimization: A panel on the state of the art in research and practice.\n \n \n \n \n\n\n \n Fu, M. C.; Bayraksan, G.; Henderson, S. G.; Nelson, B. L.; Powell, W. B.; Ryzhov, I. O.; and Thengvall, B.\n\n\n \n\n\n\n In Tolk, A.; Diallo, D.; Ryzhov, I. O.; Yilmaz, L.; Buckley, S.; and Miller, J. A., editor(s), Proceedings of the 2014 Winter Simulation Conference, pages 3696–3706, Piscataway NJ, 2014. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Simulation paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{simoptpanel14,\n\tabstract = {The goal of this panel was to discuss the state of the art in simulation optimization research and practice. The participants included representation from both academia and industry, where the latter was represented by participation from a leading software provider of optimization tools for simulation. This paper begins with a short introduction to simulation optimization, and then presents a list of specific questions that served as a basis for discussion during the panel discussion. Each of the panelists was given an opportunity to provide their preliminary thoughts on simulation optimization for this paper, and the remainder of the paper summarizes those points, ranging from assessments of the field from their perspective to initial reactions to some of the posed questions. Finally, one of the panelists who has worked on an online testbed of simulation optimization problems for the research community was asked to provide an update of the status of the Web site.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Michael C. Fu and G{\\"u}zin Bayraksan and Shane G. Henderson and Barry L. Nelson and Warren B. Powell and Ilya O. Ryzhov and Ben Thengvall},\n\tbooktitle = {Proceedings of the 2014 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 18:11:38 +0000},\n\teditor = {A. Tolk and D. Diallo and I. O. Ryzhov and L. Yilmaz and S. Buckley and J. A. Miller},\n\torganization = {IEEE},\n\tpages = {3696--3706},\n\ttitle = {Simulation optimization: A panel on the state of the art in research and practice},\n\turl_paper = {pubs/wsc14panel.pdf},\n\tyear = {2014}}\n\n
\n
\n\n\n
\n The goal of this panel was to discuss the state of the art in simulation optimization research and practice. The participants included representation from both academia and industry, where the latter was represented by participation from a leading software provider of optimization tools for simulation. This paper begins with a short introduction to simulation optimization, and then presents a list of specific questions that served as a basis for discussion during the panel discussion. Each of the panelists was given an opportunity to provide their preliminary thoughts on simulation optimization for this paper, and the remainder of the paper summarizes those points, ranging from assessments of the field from their perspective to initial reactions to some of the posed questions. Finally, one of the panelists who has worked on an online testbed of simulation optimization problems for the research community was asked to provide an update of the status of the Web site.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2013\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Mathematical Programming Guides Air-Ambulance Routing at Ornge.\n \n \n \n \n\n\n \n Carnes, T. A.; Henderson, S. G.; Shmoys, D. B.; Ahghari, M.; and MacDonald, R. D.\n\n\n \n\n\n\n Interfaces, 43: 232–239. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Mathematical paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 6 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{caretal13,\n\tabstract = {Ornge provides air-ambulance services to the Province of Ontario. A major portion of their service involves pre-scheduled transports from one medical facility to another. These transports almost exclusively require fixed-wing aircraft due to the distances involved and cost considerations. The requests are received in advance, scheduled overnight, and typically executed the following day. We describe our work in developing a planning tool which determines an assignment of requests to aircraft that minimizes cost, subject to a range of complicating constraints. The tool is in use by flight planners every day at Ornge, and has resulted in substantial savings relative to the previous manual approach to devising schedules. We describe the problem, our formulation, its implementation, and the impact on operations at Ornge.},\n\tauthor = {Timothy A. Carnes and Shane G. Henderson and David B. Shmoys and Mahvareh Ahghari and Russell D. MacDonald},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Interfaces},\n\tpages = {232--239},\n\ttitle = {Mathematical Programming Guides Air-Ambulance Routing at {Ornge}},\n\turl_paper = {pubs/ornge-interfaces.pdf},\n\tvolume = {43},\n\tyear = {2013}}\n\n
\n
\n\n\n
\n Ornge provides air-ambulance services to the Province of Ontario. A major portion of their service involves pre-scheduled transports from one medical facility to another. These transports almost exclusively require fixed-wing aircraft due to the distances involved and cost considerations. The requests are received in advance, scheduled overnight, and typically executed the following day. We describe our work in developing a planning tool which determines an assignment of requests to aircraft that minimizes cost, subject to a range of complicating constraints. The tool is in use by flight planners every day at Ornge, and has resulted in substantial savings relative to the previous manual approach to devising schedules. We describe the problem, our formulation, its implementation, and the impact on operations at Ornge.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimal budget allocation in the evaluation of simulation-optimization algorithms.\n \n \n \n \n\n\n \n Guo, E. A.; and Henderson, S. G.\n\n\n \n\n\n\n 2013.\n Unpublished Manuscript\n\n\n\n
\n\n\n\n \n \n \"Optimal paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{guohen11,\n\tabstract = {To efficiently evaluate simulation-optimization algorithms, we propose three different performance measures and their respective estimators. Only one estimator achieves the canonical Monte Carlo convergence rate $O(T^{−1/2})$, while the other two converge at the sub-canonical rate of $O(T^{−1/3})$. For each estimator, we study how the computational budget should be allocated between the execution of the optimization algorithm and the assessment of the output, so that the mean squared error of the estimator is minimized.},\n\tauthor = {Eric Anjie Guo and Shane G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tnote = {Unpublished Manuscript},\n\ttitle = {Optimal budget allocation in the evaluation of simulation-optimization algorithms},\n\turl_paper = {pubs/guohen13.pdf},\n\tyear = {2013}}\n\n
\n
\n\n\n
\n To efficiently evaluate simulation-optimization algorithms, we propose three different performance measures and their respective estimators. Only one estimator achieves the canonical Monte Carlo convergence rate $O(T^{−1/2})$, while the other two converge at the sub-canonical rate of $O(T^{−1/3})$. For each estimator, we study how the computational budget should be allocated between the execution of the optimization algorithm and the assessment of the output, so that the mean squared error of the estimator is minimized.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tuning approximate dynamic programming policies for ambulance redeployment via direct search.\n \n \n \n \n\n\n \n Maxwell, M. S.; Henderson, S. G.; and Topaloglu, H.\n\n\n \n\n\n\n Stochastic Systems, 3(2): 322-361. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Tuning paper\n  \n \n \n \"Tuning slides\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{maxhentop11,\n\tabstract = {In this paper we consider approximate dynamic programming methods for ambulance redeployment. We first demonstrate through simple examples how typical value function fitting techniques, such as approximate policy iteration and linear programming, may not be able to locate a high-quality policy even when the value function ap- proximation architecture is rich enough to provide the optimal policy. To make up for this potential shortcoming, we show how to use direct search methods to tune the parameters in a value function approxima- tion architecture so as to obtain high-quality policies. Direct search is computationally intensive. We therefore use a post-decision state dynamic programming formulation of ambulance redeployment that, together with direct search, requires far less computation with no noticeable performance loss. We provide further theoretical support for the post-decision state formulation of the ambulance-deployment problem by showing that this formulation can be obtained through a limiting argument on the original dynamic programming formulation.},\n\tauthor = {Matthew S. Maxwell and Shane G. Henderson and Huseyin Topaloglu},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tdoi = {10.1214/10-SSY020},\n\tfjournal = {Stochastic Systems},\n\tissn = {1946-5238},\n\tjournal = {Stochastic Systems},\n\tnumber = {2},\n\tpages = {322-361},\n\tsici = {1946-5238(2013)3:2<322:TADPPF>2.0.CO;2-E},\n\ttitle = {Tuning approximate dynamic programming policies for ambulance redeployment via direct search},\n\turl_paper = {pubs/SSYTuningADP.pdf},\n\turl_slides = {presentations/AmbDeploy.pdf},\n\tvolume = {3},\n\tyear = {2013},\n\tbdsk-url-1 = {http://dx.doi.org/10.1214/10-SSY020}}\n\n
\n
\n\n\n
\n In this paper we consider approximate dynamic programming methods for ambulance redeployment. We first demonstrate through simple examples how typical value function fitting techniques, such as approximate policy iteration and linear programming, may not be able to locate a high-quality policy even when the value function ap- proximation architecture is rich enough to provide the optimal policy. To make up for this potential shortcoming, we show how to use direct search methods to tune the parameters in a value function approxima- tion architecture so as to obtain high-quality policies. Direct search is computationally intensive. We therefore use a post-decision state dynamic programming formulation of ambulance redeployment that, together with direct search, requires far less computation with no noticeable performance loss. We provide further theoretical support for the post-decision state formulation of the ambulance-deployment problem by showing that this formulation can be obtained through a limiting argument on the original dynamic programming formulation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ranking and selection in a high performance computing environment.\n \n \n \n \n\n\n \n Ni, E. C.; Hunter, S. R.; and Henderson, S. G.\n\n\n \n\n\n\n In Pasupathy, R.; Kim, S.; Tolk, A.; Hill, R.; and Kuhl, M. E., editor(s), Proceedings of the 2013 Winter Simulation Conference, pages 833–845, Piscataway NJ, 2013. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Ranking paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 55 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{nietal13,\n\tabstract = {We explore the adaptation of a ranking and selection procedure, originally designed for a sequential computer, to a high-performance (parallel) computing setting. We pay particular attention to screening and explaining why care is required in implementing screening in parallel settings. We develop an algorithm that allows screening at both the master and worker levels, and that apportions work to processors in such a way that excessive communication is avoided. In doing so we rely on a random number generator with many streams and substreams.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Eric Cao Ni and Susan R. Hunter and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2013 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {R. Pasupathy and S.-H. Kim and A. Tolk and R. Hill and M. E. Kuhl},\n\torganization = {IEEE},\n\tpages = {833--845},\n\ttitle = {Ranking and selection in a high performance computing environment},\n\turl_paper = {pubs/wsc13.pdf},\n\tyear = {2013}}\n\n
\n
\n\n\n
\n We explore the adaptation of a ranking and selection procedure, originally designed for a sequential computer, to a high-performance (parallel) computing setting. We pay particular attention to screening and explaining why care is required in implementing screening in parallel settings. We develop an algorithm that allows screening at both the master and worker levels, and that apportions work to processors in such a way that excessive communication is avoided. In doing so we rely on a random number generator with many streams and substreams.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Bisection search with noisy responses.\n \n \n \n \n\n\n \n Waeber, R.; Frazier, P. I.; and Henderson, S. G.\n\n\n \n\n\n\n SIAM Journal on Control and Optimization, 51(3): 2261–2279. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Bisection paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{waefrahen13,\n\tabstract = {Bisection search is the most efficient algorithm for locating a unique point X* in [0, 1] when we are able to query an oracle only about whether X* lies to the left or right of a point x of our choosing. We study a noisy version of this classic problem, where the oracle's response is correct only with probability p. The probabilistic bisection algorithm (PBA) introduced by Horstein [IEEE Trans. Inform. Theory, 9 (1963), pp. 136--143] can be used to locate X* in this setting. While the method works extremely well in practice, very little is known about its theoretical properties. In this paper, we provide several key findings about the PBA, which lead to the main conclusion that the expected absolute residuals of successive search results, i.e., E[|X* − Xn|], converge to 0 at a geometric rate.},\n\tauthor = {Rolf Waeber and Peter I. Frazier and Shane G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {{SIAM} Journal on Control and Optimization},\n\tnumber = {3},\n\tpages = {2261--2279},\n\ttitle = {Bisection search with noisy responses},\n\turl_paper = {pubs/BisectionSearch.pdf},\n\tvolume = {51},\n\tyear = {2013}}\n\n
\n
\n\n\n
\n Bisection search is the most efficient algorithm for locating a unique point X* in [0, 1] when we are able to query an oracle only about whether X* lies to the left or right of a point x of our choosing. We study a noisy version of this classic problem, where the oracle's response is correct only with probability p. The probabilistic bisection algorithm (PBA) introduced by Horstein [IEEE Trans. Inform. Theory, 9 (1963), pp. 136–143] can be used to locate X* in this setting. While the method works extremely well in practice, very little is known about its theoretical properties. In this paper, we provide several key findings about the PBA, which lead to the main conclusion that the expected absolute residuals of successive search results, i.e., E[|X* − Xn|], converge to 0 at a geometric rate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Travel time estimation for ambulances using Bayesian data augmentation.\n \n \n \n \n\n\n \n Westgate, B. S.; Woodard, D. B.; Matteson, D. S.; and Henderson, S. G.\n\n\n \n\n\n\n Ann. Appl. Statist., 7(2): 1139-1161. 2013.\n \n\n\n\n
\n\n\n\n \n \n \"Travel paper\n  \n \n \n \"Travel supplement\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{wesetal13,\n\tabstract = {We introduce a Bayesian model for estimating the distribution of ambulance travel times on each road segment in a city, using Global Positioning System (GPS) data. Due to sparseness and error in the GPS data, the exact ambulance paths and travel times on each road segment are unknown. We simultaneously estimate the paths, travel times, and parameters of each road segment travel time distribution using Bayesian data augmentation. To draw ambulance path samples, we use a novel reversible jump Metropolis-Hastings step. We also introduce two simpler estimation methods based on GPS speed data.\n\nWe compare these methods to a recently-published travel time estimation method, using simulated data and data from Toronto EMS. In both cases, out-of-sample point and interval estimates of ambulance trip times from the Bayesian method outperform estimates from the alternative methods. We also construct probability-of-coverage maps for ambulances. The Bayesian method gives more realistic maps than the recently-published method. Finally, path estimates from the Bayesian method interpolate well between sparsely recorded GPS readings and are robust to GPS location errors.},\n\tarxiv = {1312.1873},\n\tauthor = {Bradford S. Westgate and Dawn B. Woodard and David S. Matteson and Shane G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tdoi = {10.1214/13-AOAS626},\n\tfjournal = {Annals of Applied Statistics},\n\tissn = {1932-6157},\n\tjournal = {Ann. Appl. Statist.},\n\tnumber = {2},\n\tpages = {1139-1161},\n\tsici = {1932-6157(2013)7:2<1139:TTEFAU>2.0.CO;2-I},\n\ttitle = {Travel time estimation for ambulances using Bayesian data augmentation},\n\turl_paper = {pubs/TravelTime.pdf},\n\turl_supplement = {pubs/TravelTimeSupplement},\n\tvolume = {7},\n\tyear = {2013},\n\tbdsk-url-1 = {http://dx.doi.org/10.1214/13-AOAS626}}\n\n
\n
\n\n\n
\n We introduce a Bayesian model for estimating the distribution of ambulance travel times on each road segment in a city, using Global Positioning System (GPS) data. Due to sparseness and error in the GPS data, the exact ambulance paths and travel times on each road segment are unknown. We simultaneously estimate the paths, travel times, and parameters of each road segment travel time distribution using Bayesian data augmentation. To draw ambulance path samples, we use a novel reversible jump Metropolis-Hastings step. We also introduce two simpler estimation methods based on GPS speed data. We compare these methods to a recently-published travel time estimation method, using simulated data and data from Toronto EMS. In both cases, out-of-sample point and interval estimates of ambulance trip times from the Bayesian method outperform estimates from the alternative methods. We also construct probability-of-coverage maps for ambulances. The Bayesian method gives more realistic maps than the recently-published method. Finally, path estimates from the Bayesian method interpolate well between sparsely recorded GPS readings and are robust to GPS location errors.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2012\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Allocating capacity in parallel queues to improve their resilience to deliberate attack.\n \n \n \n \n\n\n \n Carlye, W. M.; Henderson, S. G.; and Szechtman, R.\n\n\n \n\n\n\n Naval Research Logistics, 58(8): 731–742. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"Allocating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{carhensze11,\n\tabstract = {We develop models that lend insight into how to design systems that enjoy economies of scale in their operating costs, when those systems will subsequently face disruptions from accidents, acts of nature, or an intentional attack from a well-informed attacker. The systems are modeled as parallel M/M/1 queues, and the key question is how to allocate service capacity among the queues to make the system resilient to worst-case disruptions. We formulate this problem as a three-level sequential game of perfect information between a defender and a hypothetical attacker. The optimal allocation of service capacity to queues depends on the type of attack one is facing. We distinguish between deterministic incremental attacks, where some, but not all, of the capacity of each attacked queue is knocked out, and zero-one random-outcome (ZORO) attacks, where the outcome is random and either all capacity at an attacked queue is knocked out or none is. There are differences in the way one should design systems in the face of incremental or ZORO attacks. For incremental attacks it is best to concentrate capacity. For ZORO attacks the optimal allocation is more complex, typically, but not always, involving spreading the service capacity out somewhat among the servers.\n},\n\tauthor = {W. Matthew Carlye and Shane G. Henderson and Roberto Szechtman},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Naval Research Logistics},\n\tnumber = {8},\n\tpages = {731--742},\n\ttitle = {Allocating capacity in parallel queues to improve their resilience to deliberate attack},\n\turl_paper = {pubs/AttackQueues.pdf},\n\tvolume = {58},\n\tyear = {2012}}\n\n
\n
\n\n\n
\n We develop models that lend insight into how to design systems that enjoy economies of scale in their operating costs, when those systems will subsequently face disruptions from accidents, acts of nature, or an intentional attack from a well-informed attacker. The systems are modeled as parallel M/M/1 queues, and the key question is how to allocate service capacity among the queues to make the system resilient to worst-case disruptions. We formulate this problem as a three-level sequential game of perfect information between a defender and a hypothetical attacker. The optimal allocation of service capacity to queues depends on the type of attack one is facing. We distinguish between deterministic incremental attacks, where some, but not all, of the capacity of each attacked queue is knocked out, and zero-one random-outcome (ZORO) attacks, where the outcome is random and either all capacity at an attacked queue is knocked out or none is. There are differences in the way one should design systems in the face of incremental or ZORO attacks. For incremental attacks it is best to concentrate capacity. For ZORO attacks the optimal allocation is more complex, typically, but not always, involving spreading the service capacity out somewhat among the servers. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Sharpening comparisons via Gaussian copulas and semidefinite programming.\n \n \n \n \n\n\n \n Henderson, S. G.; and Ehrlichman, S. M. T.\n\n\n \n\n\n\n ACM Transactions on Modeling and Computer Simulation, 22(4): Article 22. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"Sharpening paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{henehr12,\n\tabstract = {A common problem in operations research involves comparing two system designs through simulation of both systems. The comparison can often be made more accurate through careful control (coupling) of the random numbers that are used in simulating each system, with common random numbers being the standard example. We describe a new approach for coupling the random-number inputs to two systems that involves generating realizations of a Gaussian random vector and then transforming the Gaussian random vector into the desired random-number inputs. We use nonlinear semidefinite programming to select the correlation matrix of the Gaussian random vector, with the goal of sharpening the comparison.},\n\tannote = {pubs/sharpening.pdf},\n\tauthor = {Shane G. Henderson and Samuel M. T. Ehrlichman},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2018-07-16 17:44:22 +0000},\n\tjournal = {ACM Transactions on Modeling and Computer Simulation},\n\tnumber = {4},\n\tpages = {Article 22},\n\ttitle = {Sharpening comparisons via {G}aussian copulas and semidefinite programming},\n\turl_paper = {https://dl.acm.org/authorize?N654064},\n\tvolume = {22},\n\tyear = {2012}}\n\n
\n
\n\n\n
\n A common problem in operations research involves comparing two system designs through simulation of both systems. The comparison can often be made more accurate through careful control (coupling) of the random numbers that are used in simulating each system, with common random numbers being the standard example. We describe a new approach for coupling the random-number inputs to two systems that involves generating realizations of a Gaussian random vector and then transforming the Gaussian random vector into the desired random-number inputs. We use nonlinear semidefinite programming to select the correlation matrix of the Gaussian random vector, with the goal of sharpening the comparison.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Tutorial: Teaching an advanced simulation topic.\n \n \n \n \n\n\n \n Henderson, S. G.; Jacobson, S.; and Robinson, S.\n\n\n \n\n\n\n In Laroque, C.; Himmelspach, J.; Pasupathy, R.; Rose, O.; and Uhrmacher, A. M., editor(s), Proceedings of the 2012 Winter Simulation Conference, pages 13–21, Piscataway NJ, 2012. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Tutorial: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{henjacrob12,\n\tabstract = {In this tutorial three experienced instructors describe how they teach an advanced simulation topic: Poisson processes (Henderson), variance reduction (Jacobson), and verification and validation (Robinson).},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Shane G. Henderson and Sheldon Jacobson and Stewart Robinson},\n\tbooktitle = {Proceedings of the 2012 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {C. Laroque and J. Himmelspach and R. Pasupathy and O. Rose and A. M. Uhrmacher},\n\torganization = {IEEE},\n\tpages = {13--21},\n\ttitle = {Tutorial: Teaching an advanced simulation topic},\n\turl_paper = {pubs/wsc12Teach.pdf},\n\tyear = {2012}}\n\n
\n
\n\n\n
\n In this tutorial three experienced instructors describe how they teach an advanced simulation topic: Poisson processes (Henderson), variance reduction (Jacobson), and verification and validation (Robinson).\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Prospective, real-time use of an optimization application for non-urgent patient transfers using fixed wing aircraft.\n \n \n \n \n\n\n \n MacDonald, R. D.; Walker, L.; Ahghari, M.; Carnes, T. A.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n Air Medical Journal, 31(5): 230. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"Prospective, paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{macetal12,\n\tabstract = {Aircraft are used for interfacility patient transfers to provide timely transfer to referral centers. Some transfers are non-emergent and can be scheduled and routed for efficient aircraft use. Commercial airlines have comprehensive programs and infrastructure to ensure optimized flight\nschedules and aircraft utilization.\nAir medical services operate on a smaller scale, do not operate on a fixed schedule, and cannot\npredict demand or utilization due to the nature of their service.\nHowever, air ambulance services have the same need to optimize utilization to minimize cost\nwhile meeting demand.\nOptimized deployment of air medical resources enhance access while decreasing cost.},\n\tauthor = {Russell D. MacDonald and L. Walker and Mahvareh Ahghari and Timothy A. Carnes and Shane G. Henderson and David B. Shmoys},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Air Medical Journal},\n\tnumber = {5},\n\tpages = {230},\n\ttitle = {Prospective, real-time use of an optimization application for non-urgent patient transfers using fixed wing aircraft},\n\turl_paper = {pubs/LTP2012poster.pdf},\n\tvolume = {31},\n\tyear = {2012}}\n\n
\n
\n\n\n
\n Aircraft are used for interfacility patient transfers to provide timely transfer to referral centers. Some transfers are non-emergent and can be scheduled and routed for efficient aircraft use. Commercial airlines have comprehensive programs and infrastructure to ensure optimized flight schedules and aircraft utilization. Air medical services operate on a smaller scale, do not operate on a fixed schedule, and cannot predict demand or utilization due to the nature of their service. However, air ambulance services have the same need to optimize utilization to minimize cost while meeting demand. Optimized deployment of air medical resources enhance access while decreasing cost.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Exploring bounds on ambulance deployment policy performance.\n \n \n \n \n\n\n \n Ni, E. C.; Hunter, S. R.; Henderson, S. G.; and Topaloglu, H.\n\n\n \n\n\n\n In Laroque, C.; Himmelspach, J.; Pasupathy, R.; Rose, O.; and Uhrmacher, A. M., editor(s), Proceedings of the 2012 Winter Simulation Conference, pages 497–508, 2012. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Exploring paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 18 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{nietal12,\n\tabstract = {Ambulance deployment involves controlling a fleet of ambulances, often in real time, in an attempt to keep response times small. Simulation has been used to devise redeployment policies, and bounds have been obtained from a combination of comparison methods for queues (coupling) and simulation. These techniques yield varying results on two realistic examples. In an attempt to understand the varying results, we explore the performance of the policies and bounds on artificial models.},\n\tauthor = {Eric Cao Ni and Susan R. Hunter and Shane G. Henderson and Huseyin Topaloglu},\n\tbooktitle = {Proceedings of the 2012 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {C. Laroque and J. Himmelspach and R. Pasupathy and O. Rose and A. M. Uhrmacher},\n\torganization = {IEEE},\n\tpages = {497--508},\n\ttitle = {Exploring bounds on ambulance deployment policy performance},\n\turl_paper = {pubs/wsc12ExploreBounds.pdf},\n\tyear = {2012}}\n\n
\n
\n\n\n
\n Ambulance deployment involves controlling a fleet of ambulances, often in real time, in an attempt to keep response times small. Simulation has been used to devise redeployment policies, and bounds have been obtained from a combination of comparison methods for queues (coupling) and simulation. These techniques yield varying results on two realistic examples. In an attempt to understand the varying results, we explore the performance of the policies and bounds on artificial models.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Simulating multivariate nonhomogeneous Poisson processes using projections.\n \n \n \n \n\n\n \n Saltzman, E. A.; Drew, J. H.; Leemis, L. M.; and Henderson, S. G.\n\n\n \n\n\n\n ACM Transactions on Modeling and Computer Simulation, 22(3): Article 15. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"Simulating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{saletal11,\n\tabstract = {Established techniques for generating an instance of a multivariate nonhomogeneous Poisson process (NHPP) such as thinning can become highly inefficient as the dimensionality of the process is increased, particularly if the defining intensity (or rate) function has a pronounced peak. To overcome this inefficiency, we propose an alternative approach where one first generates a projection of the NHPP onto a lower-dimensional space, and then extends the generated points to points in the original space by generating from appropriate conditional distributions. One version of this algorithm replaces a high-dimensional problem with a series of one-dimensional problems. Several examples are presented.},\n\tannote = {pubs/NHPPprojection.pdf},\n\tauthor = {Evan A. Saltzman and John H. Drew and Lawrence M. Leemis and Shane G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2018-07-16 17:46:17 +0000},\n\tjournal = {ACM Transactions on Modeling and Computer Simulation},\n\tnumber = {3},\n\tpages = {Article 15},\n\ttitle = {Simulating multivariate nonhomogeneous {P}oisson processes using projections},\n\turl_paper = {https://dl.acm.org/authorize?N654076},\n\tvolume = {22},\n\tyear = {2012}}\n\n
\n
\n\n\n
\n Established techniques for generating an instance of a multivariate nonhomogeneous Poisson process (NHPP) such as thinning can become highly inefficient as the dimensionality of the process is increased, particularly if the defining intensity (or rate) function has a pronounced peak. To overcome this inefficiency, we propose an alternative approach where one first generates a projection of the NHPP onto a lower-dimensional space, and then extends the generated points to points in the original space by generating from appropriate conditional distributions. One version of this algorithm replaces a high-dimensional problem with a series of one-dimensional problems. Several examples are presented.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A framework for selecting a selection procedure.\n \n \n \n \n\n\n \n Waeber, R.; Frazier, P. I.; and Henderson, S. G.\n\n\n \n\n\n\n ACM Transactions on Modeling and Computer Simulation, 22(4): Article 16. 2012.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{waefrahen12,\n\tabstract = {For many discrete simulation optimization applications, it is often difficult to decide which Ranking and Selection (R\\&S) procedure to use. To efficiently compare R\\&S procedures, we present a three-layer performance evaluation process. We show that the two most popular performance formulations, namely the Bayesian formulation and the indifference zone formulation, have a common representation analogous to convex risk measures used in mathematical finance. We then specify how a decision maker can impose a performance requirement on R\\&S procedures that is more adequate for her risk attitude than the indifference zone or the Bayesian performance requirements. Such a performance requirement partitions the space of R\\&S procedures into acceptable and nonacceptable procedures. The minimal computational budget required for a procedure to become acceptable introduces an easy-to-interpret preference order on the set of R\\&S policies. We demonstrate with a numerical example how the introduced framework can be used to guide the choice of selection procedure in practice.},\n\tannote = {pubs/selectionframework.pdf},\n\tauthor = {Rolf Waeber and Peter I. Frazier and Shane G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2018-07-16 17:45:33 +0000},\n\tjournal = {ACM Transactions on Modeling and Computer Simulation},\n\tnumber = {4},\n\tpages = {Article 16},\n\ttitle = {A framework for selecting a selection procedure},\n\turl_paper = {https://dl.acm.org/authorize?N654075},\n\tvolume = {22},\n\tyear = {2012}}\n\n
\n
\n\n\n
\n For many discrete simulation optimization applications, it is often difficult to decide which Ranking and Selection (R&S) procedure to use. To efficiently compare R&S procedures, we present a three-layer performance evaluation process. We show that the two most popular performance formulations, namely the Bayesian formulation and the indifference zone formulation, have a common representation analogous to convex risk measures used in mathematical finance. We then specify how a decision maker can impose a performance requirement on R&S procedures that is more adequate for her risk attitude than the indifference zone or the Bayesian performance requirements. Such a performance requirement partitions the space of R&S procedures into acceptable and nonacceptable procedures. The minimal computational budget required for a procedure to become acceptable introduces an easy-to-interpret preference order on the set of R&S policies. We demonstrate with a numerical example how the introduced framework can be used to guide the choice of selection procedure in practice.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2011\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Use of a novel application to optimize aircraft utilization for non-urgent patient transfers.\n \n \n \n \n\n\n \n MacDonald, R. D.; Ahghari, M.; Carnes, T. A.; Henderson, S. G.; and Shmoys, D. B.\n\n\n \n\n\n\n Air Medical Journal, 30(5): 255. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"Use paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{macetal11,\n\tabstract = {Aircraft are used for interfacility patient transfers to provide timely transfer to referral centers. Some transfers are non-emergent and can be scheduled and routed for efficient aircraft use. Commercial airlines have comprehensive programs and infrastructure to ensure optimized flight\nschedules and aircraft utilization.\nAir medical services operate on a smaller scale, do not operate on a fixed schedule, and cannot\npredict demand or utilization due to the nature of their service.\nHowever, air ambulance services have the same need to optimize utilization to minimize cost\nwhile meeting demand.\nOptimized deployment of air medical resources enhance access while decreasing cost.},\n\tauthor = {Russell D. MacDonald and Mahvareh Ahghari and Tim A. Carnes and Shane G. Henderson and David B. Shmoys},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Air Medical Journal},\n\tnumber = {5},\n\tpages = {255},\n\ttitle = {Use of a novel application to optimize aircraft utilization for non-urgent patient transfers},\n\turl_paper = {pubs/LTP2011poster.pdf},\n\tvolume = {30},\n\tyear = {2011}}\n\n
\n
\n\n\n
\n Aircraft are used for interfacility patient transfers to provide timely transfer to referral centers. Some transfers are non-emergent and can be scheduled and routed for efficient aircraft use. Commercial airlines have comprehensive programs and infrastructure to ensure optimized flight schedules and aircraft utilization. Air medical services operate on a smaller scale, do not operate on a fixed schedule, and cannot predict demand or utilization due to the nature of their service. However, air ambulance services have the same need to optimize utilization to minimize cost while meeting demand. Optimized deployment of air medical resources enhance access while decreasing cost.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Forecasting emergency medical service call arrival rates.\n \n \n \n \n\n\n \n Matteson, D. S.; McLean, M. W.; Woodard, D. B.; and Henderson, S. G.\n\n\n \n\n\n\n Annals of Applied Statistics, 5(2B): 1379–1406. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"Forecasting paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{matetal11,\n\tabstract = {We introduce a new method for forecasting emergency call arrival rates that combines integer-valued time series models with a dynamic latent factor structure. Covariate information is captured via simple constraints on the factor loadings. We directly model the count-valued arrivals per hour, rather than using an artificial assumption of normality. This is crucial for the emergency medical service context, in which the volume of calls may be very low. Smoothing splines are used in estimating the factor levels and loadings to im- prove long-term forecasts. We impose time series structure at the hourly level, rather than at the daily level, capturing the fine-scale dependence in addition to the long-term structure.\n\t\nOur analysis considers all emergency priority calls received by Toronto EMS between January 2007 and December 2008 for which an ambulance was dispatched. Empirical results demonstrate significantly reduced error in forecasting call arrival volume. To quantify the impact of reduced forecast errors, we design a queueing model simulation that approximates the dynamics of an ambulance system. The results show better performance as the forecasting method improves. This notion of quantifying the operational impact of improved statistical procedures may be of independent interest.},\n\tauthor = {D. S. Matteson and M. W. McLean and D. B. Woodard and S. G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Annals of Applied Statistics},\n\tnumber = {2B},\n\tpages = {1379--1406},\n\ttitle = {Forecasting emergency medical service call arrival rates},\n\turl_paper = {pubs/ForecastEMSTime.pdf},\n\tvolume = {5},\n\tyear = {2011}}\n\n
\n
\n\n\n
\n We introduce a new method for forecasting emergency call arrival rates that combines integer-valued time series models with a dynamic latent factor structure. Covariate information is captured via simple constraints on the factor loadings. We directly model the count-valued arrivals per hour, rather than using an artificial assumption of normality. This is crucial for the emergency medical service context, in which the volume of calls may be very low. Smoothing splines are used in estimating the factor levels and loadings to im- prove long-term forecasts. We impose time series structure at the hourly level, rather than at the daily level, capturing the fine-scale dependence in addition to the long-term structure. Our analysis considers all emergency priority calls received by Toronto EMS between January 2007 and December 2008 for which an ambulance was dispatched. Empirical results demonstrate significantly reduced error in forecasting call arrival volume. To quantify the impact of reduced forecast errors, we design a queueing model simulation that approximates the dynamics of an ambulance system. The results show better performance as the forecasting method improves. This notion of quantifying the operational impact of improved statistical procedures may be of independent interest.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SimOpt.\n \n \n \n \n\n\n \n Pasupathy, R.; and Henderson, S. G.\n\n\n \n\n\n\n 2011.\n \n\n\n\n
\n\n\n\n \n \n \"SimOptPaper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@misc{pashen11,\n\tauthor = {R. Pasupathy and S. G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tlastchecked = {6/27/12},\n\ttitle = {SimOpt},\n\turl = {http://www.simopt.org},\n\tyear = {2011},\n\tbdsk-url-1 = {http://www.simopt.org}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SimOpt : A library of simulation optimization problems.\n \n \n \n \n\n\n \n Pasupathy, R.; and Henderson, S. G.\n\n\n \n\n\n\n In Jain, S.; Creasey, R. R.; Himmelspach, J.; White, K. P.; and Fu, M., editor(s), Proceedings of the 2011 Winter Simulation Conference, pages 4080–4090, Piscataway NJ, 2011. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"SimOpt paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{pashen11b,\n\tabstract = {We present SimOpt --- a library of simulation-optimization problems intended to spur development and comparison of simulation-optimization methods and algorithms. The library currently has over 50 problems that are tagged by important problem attributes such as type of decision variables, and nature of constraints. Approximately half of the problems in the library come with a downloadable simulation oracle that follows a standardized calling protocol. We also propose the idea of problem and algorithm wrappers with a view toward facilitating assessment and comparison of simulation optimization algorithms.\n},\n\taddress = {Piscataway NJ},\n\tauthor = {R. Pasupathy and S. G. Henderson},\n\tbooktitle = {Proceedings of the 2011 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {S. Jain and R. R. Creasey and J. Himmelspach and K. P. White and M. Fu},\n\tpages = {4080--4090},\n\tpublisher = {IEEE},\n\ttitle = {SimOpt : A library of simulation optimization problems},\n\turl_paper = {pubs/wsc11SimOpt.pdf},\n\tyear = {2011}}\n\n
\n
\n\n\n
\n We present SimOpt — a library of simulation-optimization problems intended to spur development and comparison of simulation-optimization methods and algorithms. The library currently has over 50 problems that are tagged by important problem attributes such as type of decision variables, and nature of constraints. Approximately half of the problems in the library come with a downloadable simulation oracle that follows a standardized calling protocol. We also propose the idea of problem and algorithm wrappers with a view toward facilitating assessment and comparison of simulation optimization algorithms. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A Bayesian approach to stochastic root finding.\n \n \n \n \n\n\n \n Waeber, R.; Frazier, P. I.; and Henderson, S. G.\n\n\n \n\n\n\n In Jain, S.; Creasey, R. R.; Himmelspach, J.; White, K. P.; and Fu, M., editor(s), Proceedings of the 2011 Winter Simulation Conference, pages 4038–4050, Piscataway NJ, 2011. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{waefrahen11,\n\tabstract = {A stylized model of one-dimensional stochastic root-finding involves repeatedly querying an oracle as to whether the root lies to the left or right of a given point x. The oracle answers this question, but the received answer is incorrect with probability 1 − p(x). A Bayesian-style algorithm for this problem that assumes knowledge of p(.) repeatedly updates a density giving, in some sense, one's belief about the location of the root. We demonstrate how the algorithm works, and provide some results that shed light on its performance, both when p(.) is constant and when p(.) varies with x.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Rolf Waeber and Peter I. Frazier and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2011 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {S. Jain and R. R. Creasey and J. Himmelspach and K. P. White and M. Fu},\n\tpages = {4038--4050},\n\tpublisher = {IEEE},\n\ttitle = {A {B}ayesian approach to stochastic root finding},\n\turl_paper = {pubs/wsc11Waeber.pdf},\n\tyear = {2011}}\n\n
\n
\n\n\n
\n A stylized model of one-dimensional stochastic root-finding involves repeatedly querying an oracle as to whether the root lies to the left or right of a given point x. The oracle answers this question, but the received answer is incorrect with probability 1 − p(x). A Bayesian-style algorithm for this problem that assumes knowledge of p(.) repeatedly updates a density giving, in some sense, one's belief about the location of the root. We demonstrate how the algorithm works, and provide some results that shed light on its performance, both when p(.) is constant and when p(.) varies with x.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stationarity of generalized autoregressive moving average models.\n \n \n \n \n\n\n \n Woodard, D. B.; Matteson, D. S.; and Henderson, S. G.\n\n\n \n\n\n\n Electron. J. Statist., 5: 800-828. 2011.\n \n\n\n\n
\n\n\n\n \n \n \"Stationarity paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{woomathen11,\n\tabstract = {Time series models are often constructed by combining nonstationary effects such as trends with stochastic processes that are believed to be stationary. Although stationarity of the underlying process is typically crucial to ensure desirable properties or even validity of statistical estimators, there are numerous time series models for which this stationarity is not yet proven. A major barrier is that the most commonly-used methods assume phi-irreducibility, a condition that can be violated for the important class of discrete-valued observation-driven models.\n\t\nWe show (strict) stationarity for the class of Generalized Autoregressive Moving Average (GARMA) models, which provides a flexible analogue of ARMA models for count, binary, or other discrete-valued data. We do this from two perspectives. First, we show conditions under which GARMA models have a unique stationary distribution (so are strictly stationary when initialized in that distribution). This result potentially forms the foundation for broadly showing consistency and asymptotic normality of maximum likelihood estimators for GARMA models. Since these conclusions are not immediate, however, we also take a second approach. We show stationarity and ergodicity of a perturbed version of the GARMA model, which utilizes the fact that the perturbed model is phi-irreducible and immediately implies consistent estimation of the mean, lagged covariances, and other functionals of the perturbed process. We relate the perturbed and original processes by showing that the perturbed model yields parameter estimates that are arbitrarily close to those of the original model.},\n\tauthor = {Dawn B. Woodard and David S. Matteson and Shane G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tdoi = {10.1214/11-EJS627},\n\tfjournal = {Electronic Journal of Statistics},\n\tissn = {1935-7524},\n\tjournal = {Electron. J. Statist.},\n\tpages = {800-828},\n\tsici = {1935-7524(2011)5:0<800:SOGAMA>2.0.CO;2-7},\n\ttitle = {Stationarity of generalized autoregressive moving average models},\n\turl_paper = {pubs/GARMAstationary.pdf},\n\tvolume = {5},\n\tyear = {2011},\n\tbdsk-url-1 = {http://dx.doi.org/10.1214/11-EJS627}}\n\n
\n
\n\n\n
\n Time series models are often constructed by combining nonstationary effects such as trends with stochastic processes that are believed to be stationary. Although stationarity of the underlying process is typically crucial to ensure desirable properties or even validity of statistical estimators, there are numerous time series models for which this stationarity is not yet proven. A major barrier is that the most commonly-used methods assume phi-irreducibility, a condition that can be violated for the important class of discrete-valued observation-driven models. We show (strict) stationarity for the class of Generalized Autoregressive Moving Average (GARMA) models, which provides a flexible analogue of ARMA models for count, binary, or other discrete-valued data. We do this from two perspectives. First, we show conditions under which GARMA models have a unique stationary distribution (so are strictly stationary when initialized in that distribution). This result potentially forms the foundation for broadly showing consistency and asymptotic normality of maximum likelihood estimators for GARMA models. Since these conclusions are not immediate, however, we also take a second approach. We show stationarity and ergodicity of a perturbed version of the GARMA model, which utilizes the fact that the perturbed model is phi-irreducible and immediately implies consistent estimation of the mean, lagged covariances, and other functionals of the perturbed process. We relate the perturbed and original processes by showing that the perturbed model yields parameter estimates that are arbitrarily close to those of the original model.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2010\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Approximate dynamic programming for ambulance redeployment.\n \n \n \n \n\n\n \n Maxwell, M. S.; Restrepo, M.; Henderson, S. G.; and Topaloglu, H.\n\n\n \n\n\n\n INFORMS Journal on Computing, 22: 266–281. 2010.\n \n\n\n\n
\n\n\n\n \n \n \"ApproximatePaper\n  \n \n \n \"Approximate paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{maxetal10,\n\tabstract = {We present an approximate dynamic programming approach for making ambulance redeployment decisions in an emergency medical service system. The primary decision is where we should redeploy idle ambulances so as to maximize the number of calls reached within a delay threshold. We begin by formulating this problem as a dynamic program. To deal with the high-dimensional and uncountable state space in the dynamic program, we construct approximations to the value function that are parameterized by a small number of parameters. We tune the parameters using simulated cost trajectories of the system. Computational experi- ments demonstrate the performance of the approach on emergency medical service systems in two metropolitan areas. We report practically significant improvements in performance relative to benchmark static policies.},\n\tauthor = {M. S. Maxwell and M. Restrepo and S. G. Henderson and H. Topaloglu},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {INFORMS Journal on Computing},\n\tpages = {266--281},\n\ttitle = {Approximate dynamic programming for ambulance redeployment},\n\turl = {http://pubsonline.informs.org/doi/abs/10.1287/ijoc.1090.0345},\n\turl_paper = {pubs/ADPforAmb.pdf},\n\tvolume = {22},\n\tyear = {2010},\n\tbdsk-url-1 = {http://pubsonline.informs.org/doi/abs/10.1287/ijoc.1090.0345}}\n\n
\n
\n\n\n
\n We present an approximate dynamic programming approach for making ambulance redeployment decisions in an emergency medical service system. The primary decision is where we should redeploy idle ambulances so as to maximize the number of calls reached within a delay threshold. We begin by formulating this problem as a dynamic program. To deal with the high-dimensional and uncountable state space in the dynamic program, we construct approximations to the value function that are parameterized by a small number of parameters. We tune the parameters using simulated cost trajectories of the system. Computational experi- ments demonstrate the performance of the approach on emergency medical service systems in two metropolitan areas. We report practically significant improvements in performance relative to benchmark static policies.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Identifying effective policies in approximate dynamic programming: beyond regression.\n \n \n \n \n\n\n \n Maxwell, M. S.; Henderson, S. G.; and Topaloglu, H.\n\n\n \n\n\n\n In Johansson, B.; Jain, S.; Hugan, J.; and Yücesan, E., editor(s), Proceedings of the 2010 Winter Simulation Conference, pages 1079–1087, Piscataway NJ, 2010. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Identifying paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{maxhentop10,\n\tabstract = {Dynamic programming formulations may be used to solve for optimal policies in Markov decision processes. Due to computational complexity dynamic programs must often be solved approximately. We consider the case of a tunable approximation architecture used in lieu of computing true value functions. The standard methodology advocates tuning the approximation architecture via sample path information and regression to get a good fit to the true value function. We provide an example which shows that this approach may unnecessarily lead to poorly performing policies and suggest direct search methods to find better performing value function approximations. We illustrate this concept with an application from ambulance redeployment.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {M. S. Maxwell and S. G. Henderson and H. Topaloglu},\n\tbooktitle = {Proceedings of the 2010 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {B. Johansson and S. Jain and J. Hugan and E. Y{\\"{u}}cesan},\n\tpages = {1079--1087},\n\tpublisher = {IEEE},\n\ttitle = {Identifying effective policies in approximate dynamic programming: beyond regression},\n\turl_paper = {pubs/wsc10Maxwell.pdf},\n\tyear = {2010}}\n\n
\n
\n\n\n
\n Dynamic programming formulations may be used to solve for optimal policies in Markov decision processes. Due to computational complexity dynamic programs must often be solved approximately. We consider the case of a tunable approximation architecture used in lieu of computing true value functions. The standard methodology advocates tuning the approximation architecture via sample path information and regression to get a good fit to the true value function. We provide an example which shows that this approach may unnecessarily lead to poorly performing policies and suggest direct search methods to find better performing value function approximations. We illustrate this concept with an application from ambulance redeployment.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance measures for ranking and selection procedures.\n \n \n \n \n\n\n \n Waeber, R.; Frazier, P. I.; and Henderson, S. G.\n\n\n \n\n\n\n In Johansson, B.; Jain, S.; Hugan, J.; and Yücesan, E., editor(s), Proceedings of the 2010 Winter Simulation Conference, pages 1235–1245, Piscataway NJ, 2010. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Performance paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{waefrahen10,\n\tabstract = {To efficiently compare Ranking and Selection procedures, we present a three-layer performance evaluation process. The two most popular formulations, namely the Bayes and Indifference Zone formulations, have a common representation analogous to convex risk measures used in quantitative risk management. We study decision makers' acceptance sets via an axiomatic approach and introduce a new performance measure using computational cost.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {Rolf Waeber and Peter I. Frazier and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2010 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {B. Johansson and S. Jain and J. Hugan and E. Y{\\"{u}}cesan},\n\tpages = {1235--1245},\n\tpublisher = {IEEE},\n\ttitle = {Performance measures for ranking and selection procedures},\n\turl_paper = {pubs/wsc10Waeber.pdf},\n\tyear = {2010}}\n\n
\n
\n\n\n
\n To efficiently compare Ranking and Selection procedures, we present a three-layer performance evaluation process. The two most popular formulations, namely the Bayes and Indifference Zone formulations, have a common representation analogous to convex risk measures used in quantitative risk management. We study decision makers' acceptance sets via an axiomatic approach and introduce a new performance measure using computational cost.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2009\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Estimating the probability that the game of Monopoly never ends.\n \n \n \n \n\n\n \n Friedman, E. J.; Henderson, S. G.; Byuen, T.; and Gutiérrez Gallardo, G.\n\n\n \n\n\n\n In Rossetti, M. D.; Hill, R. R.; Johansson, B.; Dunkin, A.; and Ingalls, R. G., editor(s), Proceedings of the 2009 Winter Simulation Conference, pages 380–391, Piscataway NJ, 2009. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Estimating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{frietal09,\n\tabstract = {We estimate the probability that the game of Monopoly between two players playing very simple strategies never ends. Four different estimators, based respectively on straightforward simulation, a Brownian motion approximation, asymptotics for Markov chains, and importance sampling all yield an estimate of approximately twelve percent.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {E. J. Friedman and S. G. Henderson and T. Byuen and G. {Guti\\'{e}rrez Gallardo}},\n\tbooktitle = {Proceedings of the 2009 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {M. D. Rossetti and R. R. Hill and B. Johansson and A. Dunkin and R. G. Ingalls},\n\tpages = {380--391},\n\tpublisher = {IEEE},\n\ttitle = {Estimating the probability that the game of {M}onopoly never ends},\n\turl_paper = {pubs/monopoly.pdf},\n\tyear = {2009}}\n\n
\n
\n\n\n
\n We estimate the probability that the game of Monopoly between two players playing very simple strategies never ends. Four different estimators, based respectively on straightforward simulation, a Brownian motion approximation, asymptotics for Markov chains, and importance sampling all yield an estimate of approximately twelve percent.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Patchwork Distributions.\n \n \n \n \n\n\n \n Ghosh, S.; and Henderson, S. G.\n\n\n \n\n\n\n In C. Alexopoulos, D. G.; and Wilson, J., editor(s), Advancing the Frontiers of Simulation: A Festschrift in Honor of George Samuel Fishman, volume 133, of International Series in Operations Research & Management Science, pages 65–86. Springer, 2009.\n \n\n\n\n
\n\n\n\n \n \n \"Patchwork paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{ghohen09,\n\tabstract = {Patchwork distributions are a class of distributions for use in simulation that can be used to model finite-dimensional random vectors with given marginal distributions and dependence properties. They are an extension of the previously developed chessboard distributions. We show how patchwork distributions can be selected to match several user-specified properties of the joint distribution. In constructing a patchwork distribution, one must solve a linear program that is potentially large. We develop results that shed light on the size of the linear program that one must solve. These results suggest that patchwork distributions should only be used to model random vectors with low dimension, say less than or equal to 5.},\n\tauthor = {S. Ghosh and S. G. Henderson},\n\tbooktitle = {Advancing the Frontiers of Simulation: A Festschrift in Honor of {G}eorge {S}amuel {F}ishman},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {C. Alexopoulos, D. Goldsman and J.R. Wilson},\n\tpages = {65--86},\n\tpublisher = {Springer},\n\tseries = {International Series in Operations Research \\& Management Science},\n\ttitle = {Patchwork Distributions},\n\turl_paper = {pubs/patchwork.pdf},\n\tvolume = {133},\n\tyear = {2009}}\n\n
\n
\n\n\n
\n Patchwork distributions are a class of distributions for use in simulation that can be used to model finite-dimensional random vectors with given marginal distributions and dependence properties. They are an extension of the previously developed chessboard distributions. We show how patchwork distributions can be selected to match several user-specified properties of the joint distribution. In constructing a patchwork distribution, one must solve a linear program that is potentially large. We develop results that shed light on the size of the linear program that one must solve. These results suggest that patchwork distributions should only be used to model random vectors with low dimension, say less than or equal to 5.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Corrigendum: Behavior of the NORTA Method for Correlated Random Vector Generation As the Dimension Increases.\n \n \n \n \n\n\n \n Ghosh, S.; and Henderson, S. G.\n\n\n \n\n\n\n ACM Trans. Model. Comput. Simul., 19(4): 20:1–20:3. November 2009.\n \n\n\n\n
\n\n\n\n \n \n \"Corrigendum:Paper\n  \n \n \n \"Corrigendum: paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n \n \n \n \n \n \n \n \n\n\n\n
\n
@article{Ghosh:2009:CBN:1596519.1596525,\n\tacmid = {1596525},\n\taddress = {New York, NY, USA},\n\tannote = {pubs/corrig2.pdf},\n\tarticleno = {20},\n\tauthor = {Ghosh, Soumyadip and Henderson, Shane G.},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2018-07-16 17:37:03 +0000},\n\tdoi = {10.1145/1596519.1596525},\n\tissn = {1049-3301},\n\tissue_date = {October 2009},\n\tjournal = {ACM Trans. Model. Comput. Simul.},\n\tkeywords = {NORTA method, onion method, sampling random matrices, semidefinite programming},\n\tmonth = nov,\n\tnumber = {4},\n\tnumpages = {3},\n\tpages = {20:1--20:3},\n\tpublisher = {ACM},\n\ttitle = {Corrigendum: Behavior of the NORTA Method for Correlated Random Vector Generation As the Dimension Increases},\n\turl = {http://doi.acm.org/10.1145/1596519.1596525},\n\turl_paper = {https://dl.acm.org/authorize?N654077},\n\tvolume = {19},\n\tyear = {2009},\n\tbdsk-url-1 = {http://doi.acm.org/10.1145/1596519.1596525},\n\tbdsk-url-2 = {http://dx.doi.org/10.1145/1596519.1596525}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficent fair algorithms for message communication.\n \n \n \n \n\n\n \n Gorinsky, S.; Friedman, E. J.; Henderson, S. G.; and Jechlitschek, C.\n\n\n \n\n\n\n Simulation Modelling: Practice and Theory, 17(3): 513–527. 2009.\n \n\n\n\n
\n\n\n\n \n \n \"Efficent paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{goretal09,\n\tabstract = {A computer network serves distributed applications by communicating messages between their remote ends. Many such applications desire minimal delay for their messages. Beside this efficiency objective, allocation of the network capacity is also subject to the fairness constraint of not shutting off communication for any individual message. Processor Sharing (PS) is a de facto standard of fairness but provides significantly higher average delay than Shortest Remaining Processing Time (SRPT), which is an optimally efficient but unfair algo- rithm. In this paper, we explore efficient fair algorithms for message communication where fairness means that no message is delivered later than under PS. First, we introduce a slack system to characterize fair algorithms completely and develop efficient fair algorithms called Pessimistic Fair Sojourn Protocol (PFSP), Optimistic Fair Sojourn Protocol (OFSP), and Shortest Fair Sojourn (SFS). Then, we prove that a fair online algorithm does not assure minimal average delay attainable with fairness. Our analysis also reveals lower bounds on worst-case inefficiency of fair algorithms. We conduct extensive simulations for various distributions of message sizes and arrival times. During either temporary overload or steady-state operation, SFS and other newly proposed fair algorithms support SRPT-like efficiency and consistently provide much smaller average delay than PS.\n},\n\tauthor = {S. Gorinsky and E. J. Friedman and S. G. Henderson and C. Jechlitschek},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Simulation Modelling: Practice and Theory},\n\tnumber = {3},\n\tpages = {513--527},\n\ttitle = {Efficent fair algorithms for message communication},\n\turl_paper = {pubs/EfFair.pdf},\n\tvolume = {17},\n\tyear = {2009}}\n\n
\n
\n\n\n
\n A computer network serves distributed applications by communicating messages between their remote ends. Many such applications desire minimal delay for their messages. Beside this efficiency objective, allocation of the network capacity is also subject to the fairness constraint of not shutting off communication for any individual message. Processor Sharing (PS) is a de facto standard of fairness but provides significantly higher average delay than Shortest Remaining Processing Time (SRPT), which is an optimally efficient but unfair algo- rithm. In this paper, we explore efficient fair algorithms for message communication where fairness means that no message is delivered later than under PS. First, we introduce a slack system to characterize fair algorithms completely and develop efficient fair algorithms called Pessimistic Fair Sojourn Protocol (PFSP), Optimistic Fair Sojourn Protocol (OFSP), and Shortest Fair Sojourn (SFS). Then, we prove that a fair online algorithm does not assure minimal average delay attainable with fairness. Our analysis also reveals lower bounds on worst-case inefficiency of fair algorithms. We conduct extensive simulations for various distributions of message sizes and arrival times. During either temporary overload or steady-state operation, SFS and other newly proposed fair algorithms support SRPT-like efficiency and consistently provide much smaller average delay than PS. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Operations Research Tools for Addressing Current Challenges in Emergency Medical Services.\n \n \n \n \n\n\n \n Henderson, S. G.\n\n\n \n\n\n\n In Cochran, J. J., editor(s), Wiley Encyclopedia of Operations Research and Management Science. Wiley, New York, 2009.\n \n\n\n\n
\n\n\n\n \n \n \"Operations paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{hen09,\n\tabstract = {Emergency Medical Service (EMS) providers face a host of significant challenges, including traffic congestion, increasing call volumes, hospital diversion (i.e., emergency departments at hospitals not allowing ambulances to deliver patients to them), and increasing transfer times at emergency departments. Many of these challenges can be attacked through an application of operations research techniques in conjunction with EMS expertise. The purpose of this article is to outline some of the key challenges and potential operations research based remedies, and to discuss in detail one such remedy that has various names including ``system-status management'' and ``move up.''},\n\taddress = {New York},\n\tauthor = {S. G. Henderson},\n\tbooktitle = {Wiley Encyclopedia of Operations Research and Management Science},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {J. J. Cochran},\n\tpublisher = {Wiley},\n\ttitle = {Operations Research Tools for Addressing Current Challenges in Emergency Medical Services},\n\turl_paper = {pubs/ORTools.pdf},\n\tyear = 2009}\n\n
\n
\n\n\n
\n Emergency Medical Service (EMS) providers face a host of significant challenges, including traffic congestion, increasing call volumes, hospital diversion (i.e., emergency departments at hospitals not allowing ambulances to deliver patients to them), and increasing transfer times at emergency departments. Many of these challenges can be attacked through an application of operations research techniques in conjunction with EMS expertise. The purpose of this article is to outline some of the key challenges and potential operations research based remedies, and to discuss in detail one such remedy that has various names including ``system-status management'' and ``move up.''\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ambulance redeployment: An approximate dynamic programming approach.\n \n \n \n \n\n\n \n Maxwell, M. S.; Henderson, S. G.; and Topaloglu, H.\n\n\n \n\n\n\n In Rossetti, M. D.; Hill, R. R.; Johansson, B.; Dunkin, A.; and Ingalls, R. G., editor(s), Proceedings of the 2009 Winter Simulation Conference, pages 1850–1860, Piscataway NJ, 2009. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Ambulance paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{maxhentop09,\n\tabstract = {Emergency medical service (EMS) providers are charged with the task of managing ambulances so that the time required to respond to emergency calls is minimized. One approach that may assist in reducing response times is ambulance redeployment, i.e., repositioning idle ambulances in real time. We formulate a simulation model of EMS operations to evaluate the performance of a given allocation policy and use this model in an approximate dynamic programming (ADP) context to compute high-quality redeployment policies. We find that the resulting ADP policies perform much better than sub-optimal static policies and marginally better than near-optimal static policies. Representative computational results for Edmonton, Alberta are included.},\n\taddress = {Piscataway {NJ}},\n\tauthor = {M. S. Maxwell and S. G. Henderson and H. Topaloglu},\n\tbooktitle = {Proceedings of the 2009 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {M. D. Rossetti and R. R. Hill and B. Johansson and A. Dunkin and R. G. Ingalls},\n\tpages = {1850--1860},\n\tpublisher = {IEEE},\n\ttitle = {Ambulance redeployment: An approximate dynamic programming approach},\n\turl_paper = {pubs/wsc09redeploy.pdf},\n\tyear = {2009}}\n\n
\n
\n\n\n
\n Emergency medical service (EMS) providers are charged with the task of managing ambulances so that the time required to respond to emergency calls is minimized. One approach that may assist in reducing response times is ambulance redeployment, i.e., repositioning idle ambulances in real time. We formulate a simulation model of EMS operations to evaluate the performance of a given allocation policy and use this model in an approximate dynamic programming (ADP) context to compute high-quality redeployment policies. We find that the resulting ADP policies perform much better than sub-optimal static policies and marginally better than near-optimal static policies. Representative computational results for Edmonton, Alberta are included.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Erlang loss models for the static deployment of ambulances.\n \n \n \n \n\n\n \n Restrepo, M.; Henderson, S. G.; and Topaloglu, H.\n\n\n \n\n\n\n Health Care Management Science, 12(1): 67–79. 2009.\n \n\n\n\n
\n\n\n\n \n \n \"ErlangPaper\n  \n \n \n \"Erlang paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{reshentop09,\n\tabstract = {How should one allocate a fleet of ambulances to fixed bases with the\ngoal of keeping response times to calls as small as possible? We\npresent two new models for this problem, both of which are based on\nthe Erlang loss formula.  The first model is stylized, and shows that\nallocating ambulances in proportion to the offered load is not\nnecessarily optimal and can often be substantially improved upon. The\nsecond model can be used to screen potential allocations to try to\nidentify top candidates for further investigation. Computational experiments on the first model provide insights on how we should modify ambulance allocations in response to different levels of offered load. Computational experiments on the second model compare this model with the so-called A-hypercube model and show that the our model has comparable, and in many cases, better performance in terms of the accuracy of the estimates of  performance measures. Thus, our models can be used as pre-screening tools to identify promising ambulance allocations and these promising ambulance allocations can subsequently be evaluated carefully through simulation.},\n\tauthor = {M. Restrepo and S. G. Henderson and H. Topaloglu},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Health Care Management Science},\n\tnumber = {1},\n\tpages = {67--79},\n\ttitle = {Erlang loss models for the static deployment of ambulances},\n\turl = {http://link.springer.com/article/10.1007%2Fs10729-008-9077-4},\n\turl_paper = {pubs/ErlangLossForStatic.pdf},\n\tvolume = {12},\n\tyear = {2009},\n\tbdsk-url-1 = {http://link.springer.com/article/10.1007%2Fs10729-008-9077-4}}\n\n
\n
\n\n\n
\n How should one allocate a fleet of ambulances to fixed bases with the goal of keeping response times to calls as small as possible? We present two new models for this problem, both of which are based on the Erlang loss formula. The first model is stylized, and shows that allocating ambulances in proportion to the offered load is not necessarily optimal and can often be substantially improved upon. The second model can be used to screen potential allocations to try to identify top candidates for further investigation. Computational experiments on the first model provide insights on how we should modify ambulance allocations in response to different levels of offered load. Computational experiments on the second model compare this model with the so-called A-hypercube model and show that the our model has comparable, and in many cases, better performance in terms of the accuracy of the estimates of performance measures. Thus, our models can be used as pre-screening tools to identify promising ambulance allocations and these promising ambulance allocations can subsequently be evaluated carefully through simulation.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Forecast errors in service systems.\n \n \n \n \n\n\n \n Steckley, S. G.; Henderson, S. G.; and Mehrotra, V.\n\n\n \n\n\n\n Probability in the Engineering and Informational Sciences, 23(2): 305–332. 2009.\n \n\n\n\n
\n\n\n\n \n \n \"Forecast paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{stehenmeh06,\n\tabstract = {We investigate the presence and impact of forecast errors in the arrival rate of customers to a service system. Analysis of a large dataset shows that forecast errors can be large relative to the fluctuations naturally expected in a Poisson process. We show that ignoring forecast errors typically leads to overestimates of performance and that forecast errors of the magnitude seen in our dataset can have a practically significant impact on predictions of long-run performance. We also define short-run performance as the random percentage of calls received in a particular period that are answered in a timely fashion. We prove a central limit theorem that yields a normal-mixture approximation for its distribution for Markovian queues and we sketch an argument that shows that a normal-mixture approximation should be valid in great generality. Our results provide motivation for studying staffing strategies that are more flexible than the fixed-level staffing rules traditionally studied in the literature.},\n\tauthor = {Samuel G. Steckley and Shane G. Henderson and Vijay Mehrotra},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Probability in the Engineering and Informational Sciences},\n\tnumber = {2},\n\tpages = {305--332},\n\ttitle = {Forecast errors in service systems},\n\turl_paper = {pubs/SteHenMeh09.pdf},\n\tvolume = {23},\n\tyear = {2009}}\n\n
\n
\n\n\n
\n We investigate the presence and impact of forecast errors in the arrival rate of customers to a service system. Analysis of a large dataset shows that forecast errors can be large relative to the fluctuations naturally expected in a Poisson process. We show that ignoring forecast errors typically leads to overestimates of performance and that forecast errors of the magnitude seen in our dataset can have a practically significant impact on predictions of long-run performance. We also define short-run performance as the random percentage of calls received in a particular period that are answered in a timely fashion. We prove a central limit theorem that yields a normal-mixture approximation for its distribution for Markovian queues and we sketch an argument that shows that a normal-mixture approximation should be valid in great generality. Our results provide motivation for studying staffing strategies that are more flexible than the fixed-level staffing rules traditionally studied in the literature.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2008\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Staying sane on the tenure track.\n \n \n \n \n\n\n \n Henderson, S. G.\n\n\n \n\n\n\n In Mason, S.; Hill, R.; Mönch, L.; and Rose, O., editor(s), Proceedings of the 2008 Winter Simulation Conference, pages 2954–2959, Piscataway, NJ, 2008. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Staying paper\n  \n \n \n \"Staying slides\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 5 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hen08,\n\tabstract = {A tenure-track appointment is a wonderful thing, but it really should come with an instruction manual. This article is a loosely-coupled collection of thoughts and advice on surviving a tenure-track appointment. The focus is on concrete tips and advice for an engineering-school appointment, which was my path, but perhaps some of the ideas are also valid in other environments, such as business schools and mathematics or computer science departments.},\n\taddress = {Piscataway, NJ},\n\tauthor = {Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2008 Winter Simulation Conference},\n\tdate-added = {2016-01-10 18:00:08 +0000},\n\tdate-modified = {2016-01-10 18:00:08 +0000},\n\teditor = {S. Mason and R. Hill and L. M{\\"{o}}nch and O. Rose},\n\tpages = {2954--2959},\n\tpublisher = {IEEE},\n\ttitle = {Staying sane on the tenure track},\n\turl_paper = {pubs/StayingSane.pdf},\n\turl_slides = {pubs/2008PhDColloq.ppt},\n\tyear = {2008}}\n\n
\n
\n\n\n
\n A tenure-track appointment is a wonderful thing, but it really should come with an instruction manual. This article is a loosely-coupled collection of thoughts and advice on surviving a tenure-track appointment. The focus is on concrete tips and advice for an engineering-school appointment, which was my path, but perhaps some of the ideas are also valid in other environments, such as business schools and mathematics or computer science departments.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Comparing two systems: beyond common random numbers.\n \n \n \n \n\n\n \n M. T. Ehrlichman, S.; and Henderson, S. G.\n\n\n \n\n\n\n In Mason, S.; Hill, R.; Mönch, L.; and Rose, O., editor(s), Proceedings of the 2008 Winter Simulation Conference, pages 245–251, Piscataway, NJ, 2008. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Comparing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ehrhen08,\n\tabstract = {Suppose one wishes to compare two closely related systems via stochastic simulation. Common random numbers (CRN) involves using the same streams of uniform random variates as inputs for both systems to sharpen the comparison. One can view CRN as a particular choice of copula that gives the joint distribution of the inputs of both systems. We discuss the possibility of using more general copulae, including simple examples that show how this can outperform CRN.},\n\taddress = {Piscataway, NJ},\n\tauthor = {Samuel M.~T.~Ehrlichman and Shane G.\\ Henderson},\n\tbooktitle = {Proceedings of the 2008 Winter Simulation Conference},\n\tdate-added = {2016-01-10 17:59:07 +0000},\n\tdate-modified = {2016-01-10 17:59:07 +0000},\n\teditor = {S. Mason and R. Hill and L. M{\\"{o}}nch and O. Rose},\n\tpages = {245--251},\n\tpublisher = {IEEE},\n\ttitle = {Comparing two systems: beyond common random numbers},\n\turl_paper = {pubs/TwoSystems.pdf},\n\tyear = {2008}}\n\n
\n
\n\n\n
\n Suppose one wishes to compare two closely related systems via stochastic simulation. Common random numbers (CRN) involves using the same streams of uniform random variates as inputs for both systems to sharpen the comparison. One can view CRN as a particular choice of copula that gives the joint distribution of the inputs of both systems. We discuss the possibility of using more general copulae, including simple examples that show how this can outperform CRN.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimizing call center staffing using simulation and analytic center cutting plane methods.\n \n \n \n \n\n\n \n Atlason, J.; Epelman, M.; and Henderson, S.\n\n\n \n\n\n\n Management Science, 54: 295–309. 2008.\n \n\n\n\n
\n\n\n\n \n \n \"Optimizing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{atlepehen07,\n\tabstract = {We consider the problem of minimizing staffing costs in an inbound call center, while maintaining an acceptable level of service in multiple time periods. The problem is complicated by the fact that staffing level in one time period can affect the service levels in subsequent periods. Moreover, staff schedules typically take the form of shifts covering several periods. Interactions between staffing levels in different time periods, as well as the impact of shift requirements on the staffing levels and cost should be considered in the planning. Traditional staffing methods based on stationary queueing formulas do not take this into account. We present a simulation-based analytic center cutting plane method to solve a sample average approximation of the problem. We establish convergence of the method when the service level functions are discrete pseudoconcave. An extensive numerical study of a moderately large call center shows that the method is robust and, in most of the test cases, outperforms traditional staffing heuristics that are based on analytical queueing methods.},\n\tauthor = {J.\\ Atlason and M.~A.\\ Epelman and S.~G.\\ Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Management Science},\n\tpages = {295--309},\n\ttitle = {Optimizing call center staffing using simulation and analytic center cutting plane methods},\n\turl_paper = {pubs/aeh2005.pdf},\n\tvolume = {54},\n\tyear = {2008}}\n\n
\n
\n\n\n
\n We consider the problem of minimizing staffing costs in an inbound call center, while maintaining an acceptable level of service in multiple time periods. The problem is complicated by the fact that staffing level in one time period can affect the service levels in subsequent periods. Moreover, staff schedules typically take the form of shifts covering several periods. Interactions between staffing levels in different time periods, as well as the impact of shift requirements on the staffing levels and cost should be considered in the planning. Traditional staffing methods based on stationary queueing formulas do not take this into account. We present a simulation-based analytic center cutting plane method to solve a sample average approximation of the problem. We establish convergence of the method when the service level functions are discrete pseudoconcave. An extensive numerical study of a moderately large call center shows that the method is robust and, in most of the test cases, outperforms traditional staffing heuristics that are based on analytical queueing methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The mathematics of continuous-variable simulation optimization.\n \n \n \n \n\n\n \n Kim, S.; and Henderson, S.\n\n\n \n\n\n\n In Mason, S.; Hill, R.; Moench, L.; and Rose, O., editor(s), Proceedings of the 2008 Winter Simulation Conference, pages 122–132, Piscataway NJ, 2008. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{kimhen08,\n\tabstract = {Continuous-variable simulation optimization problems are those optimization problems where the objective function is computed through stochastic simulation and the decision variables are continuous. We discuss verifiable conditions under which the objective function is continuous or differentiable, and outline some key properties of two classes of methods for solving such problems, namely sample-average approximation and stochastic approximation.},\n\taddress = {Piscataway NJ},\n\tauthor = {Sujin Kim and S.~G.\\ Henderson},\n\tbooktitle = {Proceedings of the 2008 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {S.~J.\\ Mason and R.~R.\\ Hill and L.\\ Moench and O.\\ Rose},\n\torganization = {IEEE},\n\tpages = {122--132},\n\ttitle = {The mathematics of continuous-variable simulation optimization},\n\turl_paper = {pubs/MathSimOpt.pdf},\n\tyear = {2008}}\n\n
\n
\n\n\n
\n Continuous-variable simulation optimization problems are those optimization problems where the objective function is computed through stochastic simulation and the decision variables are continuous. We discuss verifiable conditions under which the objective function is continuous or differentiable, and outline some key properties of two classes of methods for solving such problems, namely sample-average approximation and stochastic approximation.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2007\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Deterministic and stochastic root finding in one dimension for increasing convex functions.\n \n \n \n\n\n \n Ehrlichman, S. M. T.; and Henderson, S. G.\n\n\n \n\n\n\n 2007.\n Unpublished manuscript\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{ehrhen07a,\n\tauthor = {S. M. T. Ehrlichman and S. G. Henderson},\n\tdate-added = {2016-01-10 19:32:45 +0000},\n\tdate-modified = {2016-01-10 19:32:45 +0000},\n\tnote = {Unpublished manuscript},\n\ttitle = {Deterministic and stochastic root finding in one dimension for increasing convex functions},\n\tyear = {2007}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Finite-sample performance guarantees for one-dimensional stochastic root finding.\n \n \n \n \n\n\n \n Ehrlichman, S. M. T.; and Henderson, S. G.\n\n\n \n\n\n\n In Henderson, S. G.; Biller, B.; Hsieh, M.; Shortle, J.; Tew, J. D.; and Barton, R. R., editor(s), Proceedings of the 2007 Winter Simulation Conference, pages 313–321, Piscataway, New Jersey, 2007. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Finite-sample paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ehrhen07b,\n\tabstract = {We study the one-dimensional root finding problem for increasing convex functions. We give gradient-free algorithms for both exact and inexact (stochastic) function evaluations. For the stochastic case, we supply a probabilistic convergence guarantee in the spirit of selection-of-the-best methods. A worst-case bound on the work performed by the algorithm shows an improvement over naive stochastic bisection.},\n\taddress = {Piscataway, New Jersey},\n\tauthor = {S. M. T. Ehrlichman and S. G. Henderson},\n\tbooktitle = {Proceedings of the 2007 Winter Simulation Conference},\n\tdate-added = {2016-01-10 17:56:50 +0000},\n\tdate-modified = {2016-01-10 17:56:50 +0000},\n\teditor = {S. G. Henderson and B. Biller and M.-H. Hsieh and J. Shortle and J. D. Tew and R. R. Barton},\n\tpages = {313--321},\n\tpublisher = {IEEE},\n\ttitle = {Finite-sample performance guarantees for one-dimensional stochastic root finding},\n\turl_paper = {pubs/ehrlichmans16702i.pdf},\n\tyear = {2007}}\n\n
\n
\n\n\n
\n We study the one-dimensional root finding problem for increasing convex functions. We give gradient-free algorithms for both exact and inexact (stochastic) function evaluations. For the stochastic case, we supply a probabilistic convergence guarantee in the spirit of selection-of-the-best methods. A worst-case bound on the work performed by the algorithm shows an improvement over naive stochastic bisection.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Proceedings of the 2007 Winter Simulation Conference.\n \n \n \n\n\n \n Henderson, S. G.; Biller, B.; Hsieh, M.; Shortle, J.; Tew, J.; and Barton, R. R.,\n editors.\n \n\n\n \n\n\n\n IEEE, Piscataway NJ, 2007.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{wsc07,\n\taddress = {Piscataway {NJ}},\n\tdate-added = {2016-01-10 16:50:38 +0000},\n\tdate-modified = {2016-01-10 16:52:14 +0000},\n\teditor = {S. G. Henderson and B. Biller and M.-H. Hsieh and J. Shortle and J. Tew and R. R. Barton},\n\tpublisher = {IEEE},\n\ttitle = {Proceedings of the 2007 Winter Simulation Conference},\n\tyear = {2007}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Non-linear control variates for regenerative steady-state simulation.\n \n \n \n \n\n\n \n Kim, S.; and Henderson, S. G.\n\n\n \n\n\n\n In Henderson, S. G.; Biller, B.; Hsieh, M.; Shortle, J.; Tew, J. D.; and Barton, R. R., editor(s), Proceedings of the 2007 Winter Simulation Conference, pages 430–438, Piscataway, New Jersey, 2007. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Non-linear paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{kimhen07b,\n\tabstract = {We assume the existence of a parameterized family of control variates that could be used in a regenerative steady-state simulation. We show how such controls can be generated in the Markov-process setting, discuss the optimization problem of searching for a good choice of parameterization, and develop a strong law and central limit theorem for the resulting estimator.},\n\taddress = {Piscataway, New Jersey},\n\tauthor = {Sujin Kim and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2007 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 17:58:10 +0000},\n\teditor = {S. G. Henderson and B. Biller and M.-H. Hsieh and J. Shortle and J. D. Tew and R. R. Barton},\n\tpages = {430--438},\n\tpublisher = {IEEE},\n\ttitle = {Non-linear control variates for regenerative steady-state simulation},\n\turl_paper = {pubs/hendersons66121i.pdf},\n\tyear = {2007}}\n\n
\n
\n\n\n
\n We assume the existence of a parameterized family of control variates that could be used in a regenerative steady-state simulation. We show how such controls can be generated in the Markov-process setting, discuss the optimization problem of searching for a good choice of parameterization, and develop a strong law and central limit theorem for the resulting estimator.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive control variates for pricing multi-dimensional American options.\n \n \n \n \n\n\n \n M. T. Ehrlichman, S.; and G. Henderson, S.\n\n\n \n\n\n\n Journal of Computational Finance, 11(1): 65–91. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"Adaptive paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{EhrlHend07jcf,\n\tabstract = {We explore a class of control variates for the American option pricing problem. We construct the control variates by using multivariate adaptive linear regression splines to approximate the option's value function at each time step; the resulting approximate value functions are then combined to construct a martingale that approximates a ``perfect'' control variate. We demonstrate that significant variance reduction is possible even in a high-dimensional setting. Moreover, the technique is applicable to a wide range of both option payoff structures and assumptions about the underlying risk-neutral market dynamics. The only restriction is that one must be able to compute certain one-step conditional expectations of the individual underlying random variables.},\n\tauthor = {Samuel M.~T.~Ehrlichman and Shane G.~Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Journal of Computational Finance},\n\tnumber = {1},\n\tpages = {65--91},\n\ttitle = {Adaptive control variates for pricing multi-dimensional {A}merican options},\n\turl_paper = {pubs/AdaptiveAmerican.pdf},\n\tvolume = {11},\n\tyear = {2007}}\n\n
\n
\n\n\n
\n We explore a class of control variates for the American option pricing problem. We construct the control variates by using multivariate adaptive linear regression splines to approximate the option's value function at each time step; the resulting approximate value functions are then combined to construct a martingale that approximates a ``perfect'' control variate. We demonstrate that significant variance reduction is possible even in a high-dimensional setting. Moreover, the technique is applicable to a wide range of both option payoff structures and assumptions about the underlying risk-neutral market dynamics. The only restriction is that one must be able to compute certain one-step conditional expectations of the individual underlying random variables.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive control variates for finite-horizon simulation.\n \n \n \n \n\n\n \n Kim, S.; and Henderson, S. G.\n\n\n \n\n\n\n Mathematics of Operations Research, 32: 508–527. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"Adaptive paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{kimhen06,\n\tabstract = {Adaptive Monte Carlo methods are simulation efficiency improvement techniques designed to adap- tively tune simulation estimators. Most of the work on adaptive Monte Carlo methods has been devoted to adaptively tuning importance sampling schemes. We instead focus on adaptive methods based on con- trol variate schemes. We introduce two adaptive control variate methods, and develop their asymptotic properties. The first method uses stochastic approximation to adaptively tune control variate estimators. It is easy to implement, but it requires some non-trivial tuning of parameters. The second method is based on sample average approximation. Tuning is no longer required, but it can be computationally expensive. Numerical results for the pricing of barrier options are presented to demonstrate the methods.},\n\tauthor = {Sujin Kim and Shane G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Mathematics of Operations Research},\n\tpages = {508--527},\n\ttitle = {Adaptive control variates for finite-horizon simulation},\n\turl_paper = {pubs/AdaptiveControlsFinite.pdf},\n\tvolume = {32},\n\tyear = {2007}}\n\n
\n
\n\n\n
\n Adaptive Monte Carlo methods are simulation efficiency improvement techniques designed to adap- tively tune simulation estimators. Most of the work on adaptive Monte Carlo methods has been devoted to adaptively tuning importance sampling schemes. We instead focus on adaptive methods based on con- trol variate schemes. We introduce two adaptive control variate methods, and develop their asymptotic properties. The first method uses stochastic approximation to adaptively tune control variate estimators. It is easy to implement, but it requires some non-trivial tuning of parameters. The second method is based on sample average approximation. Tuning is no longer required, but it can be computationally expensive. Numerical results for the pricing of barrier options are presented to demonstrate the methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n The error in steady-state approximations for the time-dependent waiting time distribution.\n \n \n \n \n\n\n \n Steckley, S. G.; and Henderson, S. G.\n\n\n \n\n\n\n Stochastic Models, 23: 307–332. 2007.\n \n\n\n\n
\n\n\n\n \n \n \"The paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{stehen07,\n\tabstract = {Arrival processes to queues often exhibit time dependence, but time-dependent queues are notoriously difficult to analyze. Usually one adopts some kind of steady- state approximation for time-dependent performance. We consider a range of queueing models that see wide use in practice, particularly in the modeling of service industry applications, and develop approximations for the error involved in using a steady-state approximation for time-dependent quantities. We focus on the distribution of the waiting time before reaching service, although the approach we use could be extended to many other performance measures. The results reinforce and extend what is known about such errors. For example, customer abandonment leads to dramatic reductions in the error in steady-state approximations as compared to a system without customer abandonment.},\n\tauthor = {Samuel G. Steckley and Shane G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 17:05:46 +0000},\n\tdoi = {10.1080/15326340701300886},\n\tjournal = {Stochastic Models},\n\tpages = {307--332},\n\ttitle = {The error in steady-state approximations for the time-dependent waiting time distribution},\n\turl_paper = {pubs/ErrorInSteadyState.pdf},\n\tvolume = {23},\n\tyear = {2007},\n\tbdsk-url-1 = {http://dx.doi.org/10.1080/15326340701300886}}\n\n
\n
\n\n\n
\n Arrival processes to queues often exhibit time dependence, but time-dependent queues are notoriously difficult to analyze. Usually one adopts some kind of steady- state approximation for time-dependent performance. We consider a range of queueing models that see wide use in practice, particularly in the modeling of service industry applications, and develop approximations for the error involved in using a steady-state approximation for time-dependent quantities. We focus on the distribution of the waiting time before reaching service, although the approach we use could be extended to many other performance measures. The results reinforce and extend what is known about such errors. For example, customer abandonment leads to dramatic reductions in the error in steady-state approximations as compared to a system without customer abandonment.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2006\n \n \n (6)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n American options from MARS.\n \n \n \n \n\n\n \n Ehrlichman, S. M. T.; and Henderson, S. G.\n\n\n \n\n\n\n In Perrone, L. F.; Wieland, F. P.; Liu, J.; Lawson, B. G.; Nicol, D. M.; and Fujimoto, R. M., editor(s), Proceedings of the 2006 Winter Simulation Conference, pages 719–726, Piscataway, New Jersey, 2006. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"American paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ehrhen06b,\n\tabstract = {We develop a class of control variates for the American option pricing problem that are constructed through the use of MARS - multivariate adaptive regression splines. The splines approximate the option's value function at each time step, and the value function approximations are then used to construct a martingale that serves as the control variate. Significant variance reduction is possible even in high dimensions. The primary restriction is that we must be able to compute certain one-step conditional expectations.},\n\taddress = {Piscataway, New Jersey},\n\tauthor = {S. M. T. Ehrlichman and S. G. Henderson},\n\tbooktitle = {Proceedings of the 2006 Winter Simulation Conference},\n\tdate-added = {2023-05-05 13:44:25 -0400},\n\tdate-modified = {2023-05-05 13:44:25 -0400},\n\teditor = {L. F. Perrone and F. P. Wieland and J. Liu and B. G. Lawson and D. M. Nicol and R. M. Fujimoto},\n\tpages = {719--726},\n\tpublisher = {IEEE},\n\ttitle = {American options from {MARS}},\n\turl_paper = {pubs/EhrHen06b.pdf},\n\tyear = {2006}}\n\n
\n
\n\n\n
\n We develop a class of control variates for the American option pricing problem that are constructed through the use of MARS - multivariate adaptive regression splines. The splines approximate the option's value function at each time step, and the value function approximations are then used to construct a martingale that serves as the control variate. Significant variance reduction is possible even in high dimensions. The primary restriction is that we must be able to compute certain one-step conditional expectations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Corrigendum: Behaviour of the NORTA method for correlated random vector generation as the dimension increases.\n \n \n \n \n\n\n \n Ghosh, S.; and Henderson, S. G.\n\n\n \n\n\n\n ACM Transactions on Modeling and Computer Simulation, 16: 93–94. 2006.\n \n\n\n\n
\n\n\n\n \n \n \"Corrigendum: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ghohen06,\n\tannote = {pubs/corrig1.pdf},\n\tauthor = {Soumyadip Ghosh and Shane G. Henderson},\n\tdate-added = {2016-01-10 17:01:58 +0000},\n\tdate-modified = {2018-07-16 17:37:37 +0000},\n\tjournal = {ACM Transactions on Modeling and Computer Simulation},\n\tpages = {93--94},\n\ttitle = {Corrigendum: Behaviour of the NORTA method for correlated random vector generation as the dimension increases},\n\turl_paper = {https://dl.acm.org/authorize?N654078},\n\tvolume = {16},\n\tyear = {2006}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Stochastic Simulation.\n \n \n \n \n\n\n \n Henderson, S. G.; and Nelson, B. L.\n\n\n \n\n\n\n In Henderson, S. G.; and Nelson, B. L., editor(s), Simulation, of Handbooks in Operations Research and Management Science, Chapter 1. Elsevier, 2006.\n \n\n\n\n
\n\n\n\n \n \n \"Stochastic paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{hennel06b,\n\tabstract = {We introduce the topic of this book, explain what we mean by stochastic computer simulation and provide examples of application areas. We motivate the remaining chapters in the book through two in-depth examples. These examples also help clarify several concepts and techniques that are pervasive in simulation theory and practice.},\n\tauthor = {Shane G. Henderson and Barry L. Nelson},\n\tbooktitle = {Simulation},\n\tchapter = {Chapter 1},\n\tdate-added = {2016-01-10 16:47:14 +0000},\n\tdate-modified = {2016-01-10 16:49:55 +0000},\n\teditor = {Shane G. Henderson and Barry L. Nelson},\n\tpublisher = {Elsevier},\n\tseries = {Handbooks in Operations Research and Management Science},\n\ttitle = {Stochastic Simulation},\n\turl_paper = {pubs/StochCompSim.pdf},\n\tyear = {2006}}\n\n
\n
\n\n\n
\n We introduce the topic of this book, explain what we mean by stochastic computer simulation and provide examples of application areas. We motivate the remaining chapters in the book through two in-depth examples. These examples also help clarify several concepts and techniques that are pervasive in simulation theory and practice.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mathematics for Simulation.\n \n \n \n \n\n\n \n G. Henderson, S.\n\n\n \n\n\n\n In Henderson, S. G.; and Nelson, B. L., editor(s), Simulation, volume 13, of Handbooks in Operations Research and Management Science. Elsevier, Amsterdam, 2006.\n \n\n\n\n
\n\n\n\n \n \n \"Mathematics paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{Hend06,\n\tabstract = {This chapter surveys certain mathematical results and techniques that are pervasive in the analysis of stochastic simulation. The concepts are introduced through the study of a simple model of ambulance operation to ensure clarity, concreteness and cohesion.},\n\taddress = {Amsterdam},\n\tauthor = {Shane G.~Henderson},\n\tbooktitle = {Simulation},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {Shane G. Henderson and Barry L. Nelson},\n\tpublisher = {Elsevier},\n\tseries = {Handbooks in Operations Research and Management Science},\n\ttitle = {Mathematics for Simulation},\n\turl_paper = {pubs/SimBkMath4Sim.pdf},\n\tvolume = {13},\n\tyear = {2006}}\n\n
\n
\n\n\n
\n This chapter surveys certain mathematical results and techniques that are pervasive in the analysis of stochastic simulation. The concepts are introduced through the study of a simple model of ambulance operation to ensure clarity, concreteness and cohesion.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n Simulation.\n \n \n \n\n\n \n Henderson, S. G.; and Nelson, B. L.,\n editors.\n \n\n\n \n\n\n\n of Handbooks in Operations Research and Management ScienceElsevier, Amsterdam, 2006.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@book{hennel06,\n\taddress = {Amsterdam},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {S. G. Henderson and B. L. Nelson},\n\tpublisher = {Elsevier},\n\tseries = {Handbooks in Operations Research and Management Science},\n\ttitle = {Simulation},\n\tyear = {2006}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A testbed of simulation-optimization problems.\n \n \n \n \n\n\n \n Pasupathy, R.; and Henderson, S. G.\n\n\n \n\n\n\n In Perrone, L. F.; Wieland, F. P.; Liu, J.; Lawson, B. G.; Nicol, D. M.; and Fujimoto, R. M., editor(s), Proceedings of the 2006 Winter Simulation Conference, pages 255–263, Piscataway NJ, 2006. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 12 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{pashen06,\n\tabstract = {We propose a testbed of simulation-optimization problems. The purpose of the testbed is to encourage development and constructive comparison of simulation-optimization techniques and algorithms. We are particularly interested in increasing attention to the finite-time performance of algorithms, rather than the asymptotic results that one often finds in the literature.},\n\taddress = {Piscataway NJ},\n\tauthor = {R. Pasupathy and S. G. Henderson},\n\tbooktitle = {Proceedings of the 2006 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {L. F. Perrone and F. P. Wieland and J. Liu and B. G. Lawson and D. M. Nicol and R. M. Fujimoto},\n\tpages = {255--263},\n\tpublisher = {IEEE},\n\ttitle = {A testbed of simulation-optimization problems},\n\turl_paper = {pubs/PasHen06.pdf},\n\tyear = {2006}}\n\n
\n
\n\n\n
\n We propose a testbed of simulation-optimization problems. The purpose of the testbed is to encourage development and constructive comparison of simulation-optimization techniques and algorithms. We are particularly interested in increasing attention to the finite-time performance of algorithms, rather than the asymptotic results that one often finds in the literature.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2005\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Variance reduction for simulation in multiclass processing networks.\n \n \n \n \n\n\n \n Henderson, S. G.; and Meyn, S. P.\n\n\n \n\n\n\n 2005.\n \n\n\n\n
\n\n\n\n \n \n \"Variance paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{henmey05,\n\tabstract = {We use simulation to estimate the steady-state performance of a stable multiclass queueing network. Standard estimators have been seen to perform poorly when the network is heavily loaded. We introduce two new simulation estimators. The first provides substantial variance reductions in moderately-loaded networks at very little additional computational cost. The second estimator provides substantial variance reductions in heavy traffic, again for a small additional computational cost. Both methods employ the variance reduction method of control variates, and differ in terms of how the control variates are constructed.},\n\tauthor = {Shane G. Henderson and Sean P. Meyn},\n\tdate-added = {2016-01-10 19:34:59 +0000},\n\tdate-modified = {2020-06-01 13:23:38 -0400},\n\ttitle = {Variance reduction for simulation in multiclass processing networks},\n\turl_paper = {http://arxiv.org/abs/2005.14179},\n\tyear = {2005}}\n\n
\n
\n\n\n
\n We use simulation to estimate the steady-state performance of a stable multiclass queueing network. Standard estimators have been seen to perform poorly when the network is heavily loaded. We introduce two new simulation estimators. The first provides substantial variance reductions in moderately-loaded networks at very little additional computational cost. The second estimator provides substantial variance reductions in heavy traffic, again for a small additional computational cost. Both methods employ the variance reduction method of control variates, and differ in terms of how the control variates are constructed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Robust optimization for intensity modulated radiation therapy treatment planning under uncertainty.\n \n \n \n \n\n\n \n Chu, M.; Zinchenko, Y.; Henderson, S. G.; and Sharpe, M. B.\n\n\n \n\n\n\n Physics in Medicine and Biology, 50: 5463–5477. 2005.\n \n\n\n\n
\n\n\n\n \n \n \"Robust paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{chuetal05,\n\tabstract = {The recent development of Intensity Modulated Radiation Therapy (IMRT) allows the dose distribution to be tailored to match the tumour's shape and position, avoiding damage to healthy tissue to a greater extent than previously possible. Traditional treatment plans assume the target structure remains in a fixed location throughout treatment. However, many studies have shown that because of organ motion, inconsistencies in patient positioning over the weeks of treatment, etc., the tumour location is not stationary. We present a probabilistic model for the IMRT inverse problem and show that it is identical to using robust optimization techniques, under certain assumptions. For a sample prostate case, our computational results show that this method is computationally feasible and promising - compared to traditional methods, our model has the potential to find treatment plans that are more adept at sparing healthy tissue while maintaining the prescribed dose to the target.},\n\tauthor = {Millie Chu and Yuriy Zinchenko and Shane G. Henderson and Michael B. Sharpe},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Physics in Medicine and Biology},\n\tpages = {5463--5477},\n\ttitle = {Robust optimization for intensity modulated radiation therapy treatment planning under uncertainty},\n\turl_paper = {pubs/ChuIMRT.pdf},\n\tvolume = {50},\n\tyear = {2005}}\n\n
\n
\n\n\n
\n The recent development of Intensity Modulated Radiation Therapy (IMRT) allows the dose distribution to be tailored to match the tumour's shape and position, avoiding damage to healthy tissue to a greater extent than previously possible. Traditional treatment plans assume the target structure remains in a fixed location throughout treatment. However, many studies have shown that because of organ motion, inconsistencies in patient positioning over the weeks of treatment, etc., the tumour location is not stationary. We present a probabilistic model for the IMRT inverse problem and show that it is identical to using robust optimization techniques, under certain assumptions. For a sample prostate case, our computational results show that this method is computationally feasible and promising - compared to traditional methods, our model has the potential to find treatment plans that are more adept at sparing healthy tissue while maintaining the prescribed dose to the target.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Should we model dependence and nonstationarity, and if so how?.\n \n \n \n \n\n\n \n Henderson, S.\n\n\n \n\n\n\n In Kuhl, M. E.; Steiger, N. M.; Armstrong, F. B.; and Joines, J. A., editor(s), Proceedings of the 2005 Winter Simulation Conference, pages 120–129, Piscataway NJ, 2005. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Should paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hen05,\n\tabstract = {It can be difficult to develop, fit to data, and generate from, models that capture dependence and/or nonstationarity in simulation inputs. Should we bother? How should we go about it? I will discuss these issues, focusing on three examples: call center arrivals, ambulance travel speeds and wind modeling for America's Cup races.},\n\taddress = {Piscataway NJ},\n\tauthor = {Henderson,S.~G.},\n\tbooktitle = {Proceedings of the 2005 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {M. E. Kuhl and N. M. Steiger and F. B. Armstrong and J. A. Joines},\n\tpages = {120--129},\n\tpublisher = {IEEE},\n\ttitle = {Should we model dependence and nonstationarity, and if so how?},\n\turl_paper = {pubs/hendersons88667i.pdf},\n\tyear = 2005}\n\n
\n
\n\n\n
\n It can be difficult to develop, fit to data, and generate from, models that capture dependence and/or nonstationarity in simulation inputs. Should we bother? How should we go about it? I will discuss these issues, focusing on three examples: call center arrivals, ambulance travel speeds and wind modeling for America's Cup races.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance measures for service systems with a random arrival rate.\n \n \n \n \n\n\n \n Steckley, S. G.; Henderson, S. G.; and Mehrotra, V.\n\n\n \n\n\n\n In Kuhl, M. E.; Steiger, N. M.; Armstrong, F. B.; and Joines, J. A., editor(s), Proceedings of the 2005 Winter Simulation Conference, pages 566–575, Piscataway, NJ, 2005. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Performance paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{stehenmeh05,\n\tabstract = {It is commonly assumed that the arrival process of customers to a service system is a nonhomogeneous Poisson process. Call center data often refute this assumption, and several authors have postulated a doubly-stochastic Poisson process for arrivals instead. We develop approximations for both the long-run fraction of calls answered quickly, and the distribution of the fraction of calls answered quickly within a short period. We also perform a computational study to evaluate the approximations and improve our understanding of such systems.},\n\taddress = {Piscataway, NJ},\n\tauthor = {Samuel G. Steckley and Shane G. Henderson and Vijay Mehrotra},\n\tbooktitle = {Proceedings of the 2005 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {M. E. Kuhl and N. M. Steiger and F. B. Armstrong and J. A. Joines},\n\torganization = {IEEE},\n\tpages = {566--575},\n\ttitle = {Performance measures for service systems with a random arrival rate},\n\turl_paper = {pubs/hendersons86363i.pdf},\n\tyear = {2005}}\n\n
\n
\n\n\n
\n It is commonly assumed that the arrival process of customers to a service system is a nonhomogeneous Poisson process. Call center data often refute this assumption, and several authors have postulated a doubly-stochastic Poisson process for arrivals instead. We develop approximations for both the long-run fraction of calls answered quickly, and the distribution of the fraction of calls answered quickly within a short period. We also perform a computational study to evaluate the approximations and improve our understanding of such systems.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2004\n \n \n (7)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Call center staffing with simulation and cutting plane methods.\n \n \n \n \n\n\n \n Atlason, J.; Epelman, M.; and Henderson, S.\n\n\n \n\n\n\n Annals of Operations Research, 127: 333-358. 2004.\n \n\n\n\n
\n\n\n\n \n \n \"Call paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{atlepehen03,\n\tabstract = {We present an iterative cutting plane method for minimizing staffing costs in a service system subject to satisfying acceptable service level requirements over multiple time periods. We assume that the service level cannot be easily computed, and instead is evaluated using simulation. The simulation uses the method of common random numbers, so that the same sequence of random phenomena is observed when evaluating different staffing plans. In other words, we solve a sample average approximation problem. We establish convergence of the cutting plane method on a given sample average approximation. We also establish both convergence, and the rate of convergence, of the solutions to the sample average approximation to solutions of the original problem as the sample size increases. The cutting plane method relies on the service level functions being concave in the number of servers. We show how to verify this requirement as our algorithm proceeds. A numerical example showcases the properties of our method, and sheds light on when the concavity requirement can be expected to hold.},\n\tauthor = {J.\\ Atlason and M.~A.\\ Epelman and S.~G.\\ Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 19:51:57 +0000},\n\tdoi = {10.1023/B:ANOR.0000019095.91642.bb},\n\tjournal = {Annals of Operations Research},\n\tpages = {333-358},\n\ttitle = {Call center staffing with simulation and cutting plane methods},\n\turl_paper = {pubs/CuttingPlanesAndSim.pdf},\n\tvolume = {127},\n\tyear = {2004},\n\tbdsk-url-1 = {http://dx.doi.org/10.1023/B:ANOR.0000019095.91642.bb}}\n\n
\n
\n\n\n
\n We present an iterative cutting plane method for minimizing staffing costs in a service system subject to satisfying acceptable service level requirements over multiple time periods. We assume that the service level cannot be easily computed, and instead is evaluated using simulation. The simulation uses the method of common random numbers, so that the same sequence of random phenomena is observed when evaluating different staffing plans. In other words, we solve a sample average approximation problem. We establish convergence of the cutting plane method on a given sample average approximation. We also establish both convergence, and the rate of convergence, of the solutions to the sample average approximation to solutions of the original problem as the sample size increases. The cutting plane method relies on the service level functions being concave in the number of servers. We show how to verify this requirement as our algorithm proceeds. A numerical example showcases the properties of our method, and sheds light on when the concavity requirement can be expected to hold.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Optimizing call center staffing using simulation and analytic center cutting plane methods.\n \n \n \n \n\n\n \n Atlason, J.; Epelman, M.; and Henderson, S.\n\n\n \n\n\n\n 2004.\n Tech Report 04-09, IOE Department, University of Michigan, Ann Arbor, MI\n\n\n\n
\n\n\n\n \n \n \"Optimizing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{atlepehen04,\n\tabstract = {We present a simulation-based analytic center cutting plane method to solve a sample average approximation of a call center problem of minimizing staffing costs, while maintaining an acceptable level of service in multiple time periods. We establish convergence of the method when the service level functions are discrete pseudoconcave. An extensive numerical study of a moderately large call center shows that the method is robust, and, in most of the test cases, outperforms traditional staffing heuristics that are based on analytical queuing methods.},\n\tauthor = {J.\\ Atlason and M.~A.\\ Epelman and S.~G.\\ Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tnote = {Tech Report 04-09, IOE Department, University of Michigan, Ann Arbor, MI},\n\ttitle = {Optimizing call center staffing using simulation and analytic center cutting plane methods},\n\turl_paper = {pubs/SACCPM.pdf},\n\tyear = {2004}}\n\n
\n
\n\n\n
\n We present a simulation-based analytic center cutting plane method to solve a sample average approximation of a call center problem of minimizing staffing costs, while maintaining an acceptable level of service in multiple time periods. We establish convergence of the method when the service level functions are discrete pseudoconcave. An extensive numerical study of a moderately large call center shows that the method is robust, and, in most of the test cases, outperforms traditional staffing heuristics that are based on analytical queuing methods.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n A central limit theorem for empirical quantiles in the Markov chain setting.\n \n \n \n\n\n \n Henderson, S.; and Glynn, P.\n\n\n \n\n\n\n 2004.\n Working paper\n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@unpublished{hengly04,\n\tabstract = {We provide a new proof of a central limit theorem (CLT) for empirical quantiles in the positive-recurrent Markov process setting. We also establish the validity of the method of nonoverlapping batch means with a fixed number of batches for interval estimation of the quantile. The conditions of these results are likely to be difficult to verify in practice, and so we also provide ``easily verified'' sufficient conditions.},\n\tauthor = {Henderson, S.~G. and Glynn, P.~W.},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tnote = {Working paper},\n\ttitle = {A central limit theorem for empirical quantiles in the {M}arkov chain setting},\n\tyear = {2004}}\n\n
\n
\n\n\n
\n We provide a new proof of a central limit theorem (CLT) for empirical quantiles in the positive-recurrent Markov process setting. We also establish the validity of the method of nonoverlapping batch means with a fixed number of batches for interval estimation of the quantile. The conditions of these results are likely to be difficult to verify in practice, and so we also provide ``easily verified'' sufficient conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Ambulance service planning: simulation and data visualization.\n \n \n \n \n\n\n \n Henderson, S. G.; and Mason, A. J.\n\n\n \n\n\n\n In Brandeau, M.; Sainfort, F.; and Pierskalla, W. P., editor(s), Operations Research and Health Care: A Handbook of Methods and Applications, pages 77–102. Kluwer Academic, Boston, 2004.\n \n\n\n\n
\n\n\n\n \n \n \"Ambulance paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@incollection{henmas04,\n\tabstract = {\nThe ambulance-planning problem includes operational decisions such as choice of dispatching policy, strategic decisions such as where ambulances should be stationed and at what times they should operate, and tactical decisions such as station location selection. Any solution to this problem requires careful balancing of political, economic and medical objectives. Quantitative decision processes are becoming increasingly important in providing public accountability for the resource decisions that have to be made. We discuss a simulation and analysis software tool `BARTSIM' that was developed as a decision support tool for use within the St. John Ambulance Service (Auckland Region) in New Zealand (St. Johns). The novel features incorporated within this study include\n- the use of a detailed time-varying travel model for modelling travel times in the simulation,\n- methods for reducing the computational overhead associated with computing time-dependent shortest paths in the travel model,\n- the direct reuse of real data as recorded in a database (trace-driven simulation), and\n- the development of a geographic information sub-system (GIS) within BARTSIM that provides spatial visualisation of both historical data and the results of what-if simulations.\nOur experience with St. Johns, and discussions with emergency operators in Australia, North America, and Europe, suggest that emergency services do not have good tools to support their operations management at all levels (operational, strategic and tactical). Our experience has shown that a customized system such as BARTSIM can successfully combine GIS and simulation approaches to provide a quantitative decision support tool highly valued by management. Further evidence of the value of our system is provided by the recent selection of BARTSIM by the Metropolitan Ambulance Service for simulation of their operations in Melbourne, Australia. This work has led to the development of BARTSIM's successor, SIREN (Simulation for Improving Response times in Emergency Networks), which includes many enhancements to handle the greater complexities of the Melbourne operations.},\n\taddress = {Boston},\n\tauthor = {S. G. Henderson and A. J. Mason},\n\tbooktitle = {Operations Research and Health Care: A Handbook of Methods and Applications},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {M.~L.\\ Brandeau and F.\\ Sainfort and W. P.\\ Pierskalla},\n\tpages = {77--102},\n\tpublisher = {Kluwer Academic},\n\ttitle = {Ambulance service planning: simulation and data visualization},\n\turl_paper = {pubs/HendersonMasonChap.pdf},\n\tyear = 2004}\n\n
\n
\n\n\n
\n The ambulance-planning problem includes operational decisions such as choice of dispatching policy, strategic decisions such as where ambulances should be stationed and at what times they should operate, and tactical decisions such as station location selection. Any solution to this problem requires careful balancing of political, economic and medical objectives. Quantitative decision processes are becoming increasingly important in providing public accountability for the resource decisions that have to be made. We discuss a simulation and analysis software tool `BARTSIM' that was developed as a decision support tool for use within the St. John Ambulance Service (Auckland Region) in New Zealand (St. Johns). The novel features incorporated within this study include - the use of a detailed time-varying travel model for modelling travel times in the simulation, - methods for reducing the computational overhead associated with computing time-dependent shortest paths in the travel model, - the direct reuse of real data as recorded in a database (trace-driven simulation), and - the development of a geographic information sub-system (GIS) within BARTSIM that provides spatial visualisation of both historical data and the results of what-if simulations. Our experience with St. Johns, and discussions with emergency operators in Australia, North America, and Europe, suggest that emergency services do not have good tools to support their operations management at all levels (operational, strategic and tactical). Our experience has shown that a customized system such as BARTSIM can successfully combine GIS and simulation approaches to provide a quantitative decision support tool highly valued by management. Further evidence of the value of our system is provided by the recent selection of BARTSIM by the Metropolitan Ambulance Service for simulation of their operations in Melbourne, Australia. This work has led to the development of BARTSIM's successor, SIREN (Simulation for Improving Response times in Emergency Networks), which includes many enhancements to handle the greater complexities of the Melbourne operations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive simulation using perfect control variates.\n \n \n \n \n\n\n \n Henderson, S. G.; and Simon, B.\n\n\n \n\n\n\n Journal of Applied Probability, 41: 859–876. 2004.\n \n\n\n\n
\n\n\n\n \n \n \"Adaptive paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hensim04,\n\tabstract = {We introduce adaptive-simulation schemes for estimating performance measures for stochastic systems based on the method of control variates. We consider several possible methods for adaptively tuning the control-variate estimators, and describe their asymptotic properties. Under certain assumptions, including the existence of a ``perfect control variate'', all of the estimators considered converge faster than the canonical rate of $n^{−1/2}$, where $n$ is the simulation runlength. Perfect control variates for a variety of stochastic processes can be constructed from ``approximating martingales.'' We prove a central limit theorem for an adaptive estimator that converges at rate $n^{−1}\\sqrt{\\ln n}$. A similar estimator converges at rate $n^{−1}$. An exponential rate of convergence is also possible under suitable conditions.},\n\tauthor = {Shane G. Henderson and Burt Simon},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Journal of Applied Probability},\n\tpages = {859--876},\n\ttitle = {Adaptive simulation using perfect control variates},\n\turl_paper = {pubs/HendersonSimon.pdf},\n\tvolume = {41},\n\tyear = {2004}}\n\n
\n
\n\n\n
\n We introduce adaptive-simulation schemes for estimating performance measures for stochastic systems based on the method of control variates. We consider several possible methods for adaptively tuning the control-variate estimators, and describe their asymptotic properties. Under certain assumptions, including the existence of a ``perfect control variate'', all of the estimators considered converge faster than the canonical rate of $n^{−1/2}$, where $n$ is the simulation runlength. Perfect control variates for a variety of stochastic processes can be constructed from ``approximating martingales.'' We prove a central limit theorem for an adaptive estimator that converges at rate $n^{−1}\\sqrt{łn n}$. A similar estimator converges at rate $n^{−1}$. An exponential rate of convergence is also possible under suitable conditions.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Adaptive control variates.\n \n \n \n \n\n\n \n Kim, S.; and Henderson, S. G.\n\n\n \n\n\n\n In Ingalls, R.; Rossetti, M.; Smith, J.; and Peters, B., editor(s), Proceedings of the 2004 Winter Simulation Conference, pages 621–629, Piscataway NJ, 2004. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Adaptive paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{kimhen04,\n\tabstract = {Adaptive Monte Carlo methods are specialized Monte Carlo simulation techniques where the methods are adaptively tuned as the simulation progresses. The primary focus of such techniques has been in adaptively tuning importance sampling distributions to reduce the variance of an estimator. We instead focus on adaptive control variate schemes, developing asymptotic theory for the performance of two adaptive control variate estimators. The first estimator is based on a stochastic approximation scheme for identifying the optimal choice of control variate. It is easily implemented, but its performance is sensitive to certain tuning parameters, the selection of which is nontrivial. The second estimator uses a sample average approximation approach. It has the advantage that it does not require any tuning parameters, but it can be computationally expensive and requires the availability of nonlinear optimization software.\n},\n\taddress = {Piscataway NJ},\n\tauthor = {Sujin Kim and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2004 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {R. Ingalls and M. Rossetti and J. Smith and B. Peters},\n\torganization = {IEEE},\n\tpages = {621--629},\n\ttitle = {Adaptive control variates},\n\turl_paper = {pubs/AdaptiveWSC04.pdf},\n\tyear = 2004}\n\n
\n
\n\n\n
\n Adaptive Monte Carlo methods are specialized Monte Carlo simulation techniques where the methods are adaptively tuned as the simulation progresses. The primary focus of such techniques has been in adaptively tuning importance sampling distributions to reduce the variance of an estimator. We instead focus on adaptive control variate schemes, developing asymptotic theory for the performance of two adaptive control variate estimators. The first estimator is based on a stochastic approximation scheme for identifying the optimal choice of control variate. It is easily implemented, but its performance is sensitive to certain tuning parameters, the selection of which is nontrivial. The second estimator uses a sample average approximation approach. It has the advantage that it does not require any tuning parameters, but it can be computationally expensive and requires the availability of nonlinear optimization software. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Convergence in probability of compressed annealing.\n \n \n \n \n\n\n \n Ohlmann, J.; Bean, J.; and Henderson, S.\n\n\n \n\n\n\n Mathematics of Operations Research, 29: 837–860. 2004.\n \n\n\n\n
\n\n\n\n \n \n \"Convergence paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ohlbeahen04,\n\tabstract = {We consider combinatorial optimization problems for which the formation of a neighborhood structure of feasible solutions is impeded by a set of constraints. Neighborhoods are recovered by relaxing the complicating constraints into the objective function within a penalty term. We examine a heuristic called compressed annealing that integrates a variable penalty multiplier approach within the framework of simulated annealing. We refer to the value of the penalty multiplier as ``pressure.'' We analyze the behavior of compressed annealing by exploring the interaction between temperature (which controls the ability of compressed annealing to climb hills) and pressure (which controls the height of the hills). We develop a necessary and sufficient condition on the joint cooling and compression schedules for compressed annealing to converge in probability to the set of global minima. Our work generalizes the results of Hajek (1988) in the sense that when there are no relaxed constraints, our results reduce to his.},\n\tauthor = {J.~W.\\ Ohlmann and J.~C.\\ Bean and S.~G.\\ Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Mathematics of Operations Research},\n\tpages = {837--860},\n\ttitle = {Convergence in probability of compressed annealing},\n\turl_paper = {pubs/CompressedAnnealing.pdf},\n\tvolume = {29},\n\tyear = {2004}}\n\n
\n
\n\n\n
\n We consider combinatorial optimization problems for which the formation of a neighborhood structure of feasible solutions is impeded by a set of constraints. Neighborhoods are recovered by relaxing the complicating constraints into the objective function within a penalty term. We examine a heuristic called compressed annealing that integrates a variable penalty multiplier approach within the framework of simulated annealing. We refer to the value of the penalty multiplier as ``pressure.'' We analyze the behavior of compressed annealing by exploring the interaction between temperature (which controls the ability of compressed annealing to climb hills) and pressure (which controls the height of the hills). We develop a necessary and sufficient condition on the joint cooling and compression schedules for compressed annealing to converge in probability to the set of global minima. Our work generalizes the results of Hajek (1988) in the sense that when there are no relaxed constraints, our results reduce to his.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2003\n \n \n (11)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Using simulation to approximate subgradients of convex performance measures in service systems.\n \n \n \n \n\n\n \n Atlason, J.; Epelman, M. A.; and Henderson, S. G.\n\n\n \n\n\n\n In Chick, S. E.; Sánchez, P. J.; Morrice, D. J.; and Ferrin, D., editor(s), Proceedings of the 2003 Winter Simulation Conference, volume 2, pages 1824–1832, Piscataway, NJ, 2003. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Using paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{atlepehen03b,\n\tabstract = {We study the problem of approximating a subgradient of a convex (or concave) discrete function that is evaluated via simulation. This problem arises, for instance, in optimization problems such as finding the minimal cost staff schedule in a call center subject to a service level constraint. There, subgradient information can be used to significantly reduce the search space. The problem of approximating subgradients is closely related to the one of approximating gradients and we suggest and compare how three existing methods for computing gradients via simulation, i.e., finite differences, the likelihood ratio method and infinitesimal perturbation analysis, can be applied to approximate subgradients when the variables are discrete. We provide a computational study to highlight the properties of each approach.},\n\taddress = {Piscataway, NJ},\n\tauthor = {J. Atlason and M. A. Epelman and S. G. Henderson},\n\tbooktitle = {Proceedings of the 2003 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {S. E. Chick and P. J. S\\'{a}nchez and D. J. Morrice and D. Ferrin},\n\torganization = {IEEE},\n\tpages = {1824--1832},\n\ttitle = {Using simulation to approximate subgradients of convex performance measures in service systems},\n\turl_paper = {pubs/atlepehen03wsc.pdf},\n\tvolume = {2},\n\tyear = 2003}\n\n
\n
\n\n\n
\n We study the problem of approximating a subgradient of a convex (or concave) discrete function that is evaluated via simulation. This problem arises, for instance, in optimization problems such as finding the minimal cost staff schedule in a call center subject to a service level constraint. There, subgradient information can be used to significantly reduce the search space. The problem of approximating subgradients is closely related to the one of approximating gradients and we suggest and compare how three existing methods for computing gradients via simulation, i.e., finite differences, the likelihood ratio method and infinitesimal perturbation analysis, can be applied to approximate subgradients when the variables are discrete. We provide a computational study to highlight the properties of each approach.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Convergence of simulation-based policy iteration.\n \n \n \n \n\n\n \n Cooper, W.; Henderson, S.; and Lewis, M.\n\n\n \n\n\n\n Probability in the Engineering and Informational Sciences, 17: 213–234. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"Convergence paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{coohenlew02,\n\tabstract = {Simulation-based policy iteration (SBPI) is a modification of the policy iteration algorithm for computing optimal policies for Markov decision processes. At each iteration, rather than solving the average evaluation equations, SBPI employs simulation to estimate a solution to these equations. For recurrent average-reward Markov decision processes with finite state and action spaces, we provide easily-verifiable conditions that ensure that simulation-based policy iteration almost-surely eventually never leaves the set of optimal decision rules. We analyze three simulation estimators for solutions to the average evaluation equations. Using our general results, we derive simple conditions on the simulation runlengths that guarantee the almost-sure convergence of the algorithm.},\n\tauthor = {W.~L.\\ Cooper and S.~G.\\ Henderson and M.~E.\\ Lewis},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Probability in the Engineering and Informational Sciences},\n\tpages = {213--234},\n\ttitle = {Convergence of simulation-based policy iteration},\n\turl_paper = {pubs/csbpi2.pdf},\n\tvolume = {17},\n\tyear = {2003}}\n\n
\n
\n\n\n
\n Simulation-based policy iteration (SBPI) is a modification of the policy iteration algorithm for computing optimal policies for Markov decision processes. At each iteration, rather than solving the average evaluation equations, SBPI employs simulation to estimate a solution to these equations. For recurrent average-reward Markov decision processes with finite state and action spaces, we provide easily-verifiable conditions that ensure that simulation-based policy iteration almost-surely eventually never leaves the set of optimal decision rules. We analyze three simulation estimators for solutions to the average evaluation equations. Using our general results, we derive simple conditions on the simulation runlengths that guarantee the almost-sure convergence of the algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Fairness and efficiency in web server protocols.\n \n \n \n \n\n\n \n Friedman, E. J.; and Henderson, S. G.\n\n\n \n\n\n\n In Proceedings of the 2003 ACM SIGMETRICS International Conference on Measurement and Modeling of Computer Systems, pages 229–237, 2003. ACM Press\n \n\n\n\n
\n\n\n\n \n \n \"Fairness paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{frihen03,\n\tabstract = {We consider the problem of designing a preemptive protocol that is both fair and efficient when one is only concerned with the sojourn time of the job and not intermediate results. Our Fair Sojourn Protocol (FSP) is both efficient, in a strong sense (similar to the shortest remaining processing time protocol: SRPT), and fair, in the sense of guaranteeing that it weakly outperforms processor sharing (PS) for every job on any sample path.\nOur primary motivation is web serving in which the standard protocol is PS, while recent work proposes using SRPT or variants. Our work suggests both a framework in which to evaluate proposed protocols and an attractive new protocol, FSP.},\n\tannote = {pubs/FSPSigmetrics.pdf},\n\tauthor = {Eric J. Friedman and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2003 ACM SIGMETRICS International Conference on Measurement and Modeling of Computer Systems},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2018-07-16 17:39:04 +0000},\n\tdoi = {http://doi.acm.org/10.1145/781027.781056},\n\tisbn = {1-518813-664-1},\n\tlocation = {San Diego, CA, USA},\n\tpages = {229--237},\n\tpublisher = {ACM Press},\n\ttitle = {Fairness and efficiency in web server protocols},\n\turl_paper = {https://dl.acm.org/authorize?N654070},\n\tyear = {2003},\n\tbdsk-url-1 = {http://doi.acm.org/10.1145/781027.781056}}\n\n
\n
\n\n\n
\n We consider the problem of designing a preemptive protocol that is both fair and efficient when one is only concerned with the sojourn time of the job and not intermediate results. Our Fair Sojourn Protocol (FSP) is both efficient, in a strong sense (similar to the shortest remaining processing time protocol: SRPT), and fair, in the sense of guaranteeing that it weakly outperforms processor sharing (PS) for every job on any sample path. Our primary motivation is web serving in which the standard protocol is PS, while recent work proposes using SRPT or variants. Our work suggests both a framework in which to evaluate proposed protocols and an attractive new protocol, FSP.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Behaviour of the NORTA method for correlated random vector generation as the dimension increases.\n \n \n \n \n\n\n \n Ghosh, S.; and Henderson, S.\n\n\n \n\n\n\n ACM Transactions on Modeling and Computer Simulation, 13: 276–294. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"Behaviour corrigendumone\n  \n \n \n \"Behaviour corrigendumtwo\n  \n \n \n \"Behaviour paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ghohen03,\n\tabstract = {The NORTA method is a fast general-purpose method for generating samples of a random vector with given marginal distributions and given correlation matrix. It is known that there ex- ist marginal distributions and correlation matrices that the NORTA method cannot match, even though a random vector with the prescribed qualities exists. We investigate this problem as the di- mension of the random vector increases. Simulation results show that the problem rapidly becomes acute, in the sense that NORTA fails to work with an increasingly large proportion of correlation matrices. Simulation results also show that if one is willing to settle for a correlation matrix that is ``close'' to the desired one, then NORTA performs well with increasing dimension. As part of our analysis, we develop a method for sampling correlation matrices uniformly (in a certain precise sense) from the set of all such matrices. This procedure can be used more generally for sampling uniformly from the space of all symmetric positive definite matrices with diagonal elements fixed at positive values.},\n\tannote = {pubs/NORTAHighD.pdf},\n\tauthor = {Ghosh, S. and Henderson, S.~G.},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2018-07-16 17:38:20 +0000},\n\tjournal = {ACM Transactions on Modeling and Computer Simulation},\n\tpages = {276--294},\n\ttitle = {Behaviour of the {NORTA} method for correlated random vector generation as the dimension increases},\n\turl_corrigendumone = {pubs/corrig1.pdf},\n\turl_corrigendumtwo = {pubs/corrig2.pdf},\n\turl_paper = {https://dl.acm.org/authorize?N654079},\n\tvolume = {13},\n\tyear = 2003}\n\n
\n
\n\n\n
\n The NORTA method is a fast general-purpose method for generating samples of a random vector with given marginal distributions and given correlation matrix. It is known that there ex- ist marginal distributions and correlation matrices that the NORTA method cannot match, even though a random vector with the prescribed qualities exists. We investigate this problem as the di- mension of the random vector increases. Simulation results show that the problem rapidly becomes acute, in the sense that NORTA fails to work with an increasingly large proportion of correlation matrices. Simulation results also show that if one is willing to settle for a correlation matrix that is ``close'' to the desired one, then NORTA performs well with increasing dimension. As part of our analysis, we develop a method for sampling correlation matrices uniformly (in a certain precise sense) from the set of all such matrices. This procedure can be used more generally for sampling uniformly from the space of all symmetric positive definite matrices with diagonal elements fixed at positive values.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimation for nonhomogeneous Poisson processes from aggregated data.\n \n \n \n \n\n\n \n Henderson, S. G.\n\n\n \n\n\n\n Operations Research Letters, 31: 375–382. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"Estimation paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 3 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hen03,\n\tabstract = {A well-known heuristic for estimating the rate function or cumulative rate function of a nonhomogeneous Poisson process assumes that the rate function is piecewise constant on a set of data-independent intervals. We investigate the asymptotic (as the amount of data grows) behavior of this estimator in the case of equal interval widths, and show that it can be transformed into a consistent estimator if the interval lengths shrink at an appropriate rate as the amount of data grows.},\n\tauthor = {Shane G.\\ Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tdoi = {10.1016/S0167-6377(03)00027-0},\n\tjournal = {Operations Research Letters},\n\tpages = {375--382},\n\ttitle = {Estimation for nonhomogeneous {P}oisson processes from aggregated data},\n\turl_paper = {pubs/NHPPORL.pdf},\n\tvolume = {31},\n\tyear = {2003},\n\tbdsk-url-1 = {http://dx.doi.org/10.1016/S0167-6377(03)00027-0}}\n\n
\n
\n\n\n
\n A well-known heuristic for estimating the rate function or cumulative rate function of a nonhomogeneous Poisson process assumes that the rate function is piecewise constant on a set of data-independent intervals. We investigate the asymptotic (as the amount of data grows) behavior of this estimator in the case of equal interval widths, and show that it can be transformed into a consistent estimator if the interval lengths shrink at an appropriate rate as the amount of data grows.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Input model uncertainty: why do we care and what should we do about it?.\n \n \n \n \n\n\n \n Henderson, S. G.\n\n\n \n\n\n\n In Chick, S. E.; Sánchez, P. J.; Morrice, D. J.; and Ferrin, D., editor(s), Proceedings of the 2003 Winter Simulation Conference, pages 90–100, Piscataway, NJ, 2003. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Input paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hen03wsc,\n\tabstract = {An input model is a collection of distributions together with any associated parameters that are used as primitive inputs in a simulation model. Input model uncertainty arises when one is not completely certain what distributions and/or parameters to use. This tutorial attempts to provide a sense of why one should consider input uncertainty and what methods can be used to deal with it.},\n\taddress = {Piscataway, NJ},\n\tauthor = {Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2003 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 17:53:47 +0000},\n\teditor = {Stephen E. Chick and Paul J. S\\'{a}nchez and Douglas J. Morrice and David Ferrin},\n\torganization = {IEEE},\n\tpages = {90--100},\n\ttitle = {Input model uncertainty: why do we care and what should we do about it?},\n\turl_paper = {pubs/hen03wsc.pdf},\n\tyear = 2003}\n\n
\n
\n\n\n
\n An input model is a collection of distributions together with any associated parameters that are used as primitive inputs in a simulation model. Input model uncertainty arises when one is not completely certain what distributions and/or parameters to use. This tutorial attempts to provide a sense of why one should consider input uncertainty and what methods can be used to deal with it.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Nonexistence of a class of variate generation schemes.\n \n \n \n \n\n\n \n Henderson, S.; and Glynn, P.\n\n\n \n\n\n\n Operations Research Letters, 31: 83–89. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"Nonexistence paper\n  \n \n\n \n \n doi\n  \n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hengly03,\n\tabstract = {Motivated by a problem arising in the regenerative analysis of discrete-event system simulation, we ask whether a certain class of random variate generation schemes exists or not. Under very reasonable conditions, we prove that such variate generation schemes do not exist. The implications of this result for regenerative steady-state simulation of discrete-event systems are discussed.},\n\tauthor = {Henderson, S.~G. and Glynn, P.~W.},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tdoi = {10.1016/S0167-6377(02)00217-1},\n\tjournal = {Operations Research Letters},\n\tpages = {83--89},\n\ttitle = {Nonexistence of a class of variate generation schemes},\n\turl_paper = {pubs/NonexistenceORL.pdf},\n\tvolume = {31},\n\tyear = 2003,\n\tbdsk-url-1 = {http://dx.doi.org/10.1016/S0167-6377(02)00217-1}}\n\n
\n
\n\n\n
\n Motivated by a problem arising in the regenerative analysis of discrete-event system simulation, we ask whether a certain class of random variate generation schemes exists or not. Under very reasonable conditions, we prove that such variate generation schemes do not exist. The implications of this result for regenerative steady-state simulation of discrete-event systems are discussed.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Performance Evaluation and Policy Selection in Multiclass Networks.\n \n \n \n \n\n\n \n Henderson, S.; Meyn, S. P.; and Tadić, V.\n\n\n \n\n\n\n Discrete Event Dynamic Systems, 13: 149-189. 2003.\n Special issue on learning and optimization methods\n\n\n\n
\n\n\n\n \n \n \"Performance paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{henmeytad03,\n\tabstract = {This paper concerns modelling and policy synthesis for regulation of multiclass queueing networks.\n\t\nA 2-parameter network model is introduced to allow independent modelling of variability and mean processing-rates, while maintaining simplicity of the model. Policy synthesis is based on consideration of more tractable workload models, and then translating a policy from this abstraction to the discrete network of interest.\n\nTranslation is made possible through the use of safety-stocks that maintain feasibility of workload trajectories. This is a well-known approach in the queueing theory literature, and may be viewed as a generic approach to avoid deadlock in a discrete-event dynamical system.\n\nSimulation is used to evaluate a given policy, and to tune safety-stock levels. These simulations are accelerated through a variance reduction technique that incorporates stochastic approximation to tune the variance reduction. The search for appropriate safety-stock levels is coordinated through a cutting plane algorithm.},\n\tauthor = {Henderson, S.~G. and Meyn, S. P. and Tadi\\'{c}, V.},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Discrete Event Dynamic Systems},\n\tnote = {Special issue on learning and optimization methods},\n\tpages = {149-189},\n\ttitle = {Performance Evaluation and Policy Selection in Multiclass Networks},\n\turl_paper = {pubs/DEDSV4.pdf},\n\tvolume = {13},\n\tyear = {2003}}\n\n
\n
\n\n\n
\n This paper concerns modelling and policy synthesis for regulation of multiclass queueing networks. A 2-parameter network model is introduced to allow independent modelling of variability and mean processing-rates, while maintaining simplicity of the model. Policy synthesis is based on consideration of more tractable workload models, and then translating a policy from this abstraction to the discrete network of interest. Translation is made possible through the use of safety-stocks that maintain feasibility of workload trajectories. This is a well-known approach in the queueing theory literature, and may be viewed as a generic approach to avoid deadlock in a discrete-event dynamical system. Simulation is used to evaluate a given policy, and to tune safety-stock levels. These simulations are accelerated through a variance reduction technique that incorporates stochastic approximation to tune the variance reduction. The search for appropriate safety-stock levels is coordinated through a cutting plane algorithm.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Development of a simulation package for modelling emergency medical service operations.\n \n \n \n \n\n\n \n Mason, A. J.; Day, P.; Henderson, S. G.; Meyer, J.; Snowdon, J.; and Waite, J.\n\n\n \n\n\n\n In Proceedings of the 8th Annual International Conference on Industrial Engineering Theory, Applications and Practice, 2003. \n \n\n\n\n
\n\n\n\n \n \n \"Development paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{masetal03,\n\tabstract = {As health care demands increase, we need to find better ways to manage the scarce resources that are available. A new simulation package, named Siren, has been developed to help meet this need in the Emergency Medical Services area. We discuss the development of Siren, illustrate some of its capabilities, and present some of our experiences in using Siren to model ambulance services both in Auckland, New Zealand and in Melbourne, Australia. The Siren project is a collaborative exercise that includes the University of Auckland in New Zealand, Shane Henderson from Cornell University, and the commercial partner Optimal Decision Technologies Ltd.},\n\tauthor = {A. J. Mason and P. Day and S. G. Henderson and J. Meyer and J. Snowdon and J. Waite},\n\tbooktitle = {Proceedings of the 8th Annual International Conference on Industrial Engineering Theory, Applications and Practice},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\ttitle = {Development of a simulation package for modelling emergency medical service operations},\n\turl_paper = {pubs/IJIESiren.pdf},\n\tyear = 2003}\n\n
\n
\n\n\n
\n As health care demands increase, we need to find better ways to manage the scarce resources that are available. A new simulation package, named Siren, has been developed to help meet this need in the Emergency Medical Services area. We discuss the development of Siren, illustrate some of its capabilities, and present some of our experiences in using Siren to model ambulance services both in Auckland, New Zealand and in Melbourne, Australia. The Siren project is a collaborative exercise that includes the University of Auckland in New Zealand, Shane Henderson from Cornell University, and the commercial partner Optimal Decision Technologies Ltd.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A simulation model for predicting yacht match race outcomes.\n \n \n \n \n\n\n \n Philpott, A.; Henderson, S.; and Teirney, D.\n\n\n \n\n\n\n Operations Research, 52: 1–16. 2003.\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{phihentie03,\n\tabstract = {We outline the development of a model for predicting the outcome of a yacht match race between two competing designs. The model is a fixed-time-increment simulation that accounts for the dynamic performance of each yacht. The wind speed and direction are modelled using hidden Markov chain models. Each yacht is assumed to follow a fixed sailing strategy determined by a set of simple decision rules. The simulation models both yachts simultaneously and accounts for interactions between them - for example, when they cross. The model is illustrated by applying it to International America's Cup Class designs.},\n\tauthor = {A.~B.\\ Philpott and S.~G.\\ Henderson and D. Teirney},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Operations Research},\n\tpages = {1--16},\n\ttitle = {A simulation model for predicting yacht match race outcomes},\n\turl_paper = {pubs/AmCup.pdf},\n\tvolume = {52},\n\tyear = {2003}}\n\n
\n
\n\n\n
\n We outline the development of a model for predicting the outcome of a yacht match race between two competing designs. The model is a fixed-time-increment simulation that accounts for the dynamic performance of each yacht. The wind speed and direction are modelled using hidden Markov chain models. Each yacht is assumed to follow a fixed sailing strategy determined by a set of simple decision rules. The simulation models both yachts simultaneously and accounts for interactions between them - for example, when they cross. The model is illustrated by applying it to International America's Cup Class designs.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n A kernel approach to estimating the density of a conditional expectation.\n \n \n \n \n\n\n \n Steckley, S. G.; and Henderson, S. G.\n\n\n \n\n\n\n In Chick, S. E.; Sánchez, P. J.; Morrice, D. J.; and Ferrin, D., editor(s), Proceedings of the 2003 Winter Simulation Conference, pages 383–391, Piscataway, NJ, 2003. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"A paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{stehen03,\n\tabstract = {Given uncertainty in the input model and parameters of a simulation study, the goal of the simulation study often becomes the estimation of a conditional expectation. The conditional expectation is expected performance conditional on the selected model and parameters. The distribution of this conditional expectation describes precisely, and concisely, the impact of input uncertainty on performance prediction. In this paper we estimate the density of a conditional expectation using ideas from the field of kernel density estimation. We present a result on asymptotically optimal rates of convergence and examine a number of numerical examples.},\n\taddress = {Piscataway, NJ},\n\tauthor = {Samuel G. Steckley and Shane G. Henderson},\n\tbooktitle = {Proceedings of the 2003 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {Stephen E. Chick and Paul J. S\\'{a}nchez and Douglas J. Morrice and David Ferrin},\n\torganization = {IEEE},\n\tpages = {383--391},\n\ttitle = {A kernel approach to estimating the density of a conditional expectation},\n\turl_paper = {pubs/stehen03wsc.pdf},\n\tyear = 2003}\n\n
\n
\n\n\n
\n Given uncertainty in the input model and parameters of a simulation study, the goal of the simulation study often becomes the estimation of a conditional expectation. The conditional expectation is expected performance conditional on the selected model and parameters. The distribution of this conditional expectation describes precisely, and concisely, the impact of input uncertainty on performance prediction. In this paper we estimate the density of a conditional expectation using ideas from the field of kernel density estimation. We present a result on asymptotically optimal rates of convergence and examine a number of numerical examples.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2002\n \n \n (8)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Book review of J. Thompson. 2000. Simulation: A Modeler's Approach. Wiley, New York.\n \n \n \n\n\n \n Henderson, S. G.\n\n\n \n\n\n\n Journal of the American Statistical Association. 2002.\n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hen02,\n\tauthor = {S. G. Henderson},\n\tdate-added = {2016-01-10 19:41:31 +0000},\n\tdate-modified = {2016-01-10 19:42:21 +0000},\n\tjournal = {Journal of the American Statistical Association},\n\ttitle = {Book review of J. Thompson. 2000. Simulation: A Modeler's Approach. Wiley, New York.},\n\tyear = {2002}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n SAP: An efficient scheduling protocol for web servers.\n \n \n \n \n\n\n \n Friedman, E. J.; and Henderson, S. G.\n\n\n \n\n\n\n Technical Report ORIE, Cornell University, Ithaca NY, 2002.\n \n\n\n\n
\n\n\n\n \n \n \"SAP: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{frihen02,\n\tabstract = {We present a new scheduling protocol that we call the ``starvation avoidance protocol'' (SAP). SAP is designed for situations in which users primarily care about the completion time of a job, such as web serving. SAP is similar to the shortest remaining processing time (SRPT) protocol in that small jobs typically receive priority. However, unlike SRPT, SAP has a safeguarding feature that ensures that large jobs cannot be starved indefinitely. In particular, SAP guarantees that every job completes at least as early as it would under processor sharing (PS), a common web serving protocol. Indeed, SAP is strictly better than PS for every nonterminal job during a busy period. Thus, SAP provides an attractive protocol for web servers.},\n\taddress = {Ithaca NY},\n\tauthor = {E. J. Friedman and S. G. Henderson},\n\tdate-added = {2016-01-10 19:38:59 +0000},\n\tdate-modified = {2016-01-10 19:40:01 +0000},\n\tinstitution = {ORIE, Cornell University},\n\ttitle = {SAP: An efficient scheduling protocol for web servers},\n\turl_paper = {pubs/sap.pdf},\n\tyear = {2002}}\n\n
\n
\n\n\n
\n We present a new scheduling protocol that we call the ``starvation avoidance protocol'' (SAP). SAP is designed for situations in which users primarily care about the completion time of a job, such as web serving. SAP is similar to the shortest remaining processing time (SRPT) protocol in that small jobs typically receive priority. However, unlike SRPT, SAP has a safeguarding feature that ensures that large jobs cannot be starved indefinitely. In particular, SAP guarantees that every job completes at least as early as it would under processor sharing (PS), a common web serving protocol. Indeed, SAP is strictly better than PS for every nonterminal job during a busy period. Thus, SAP provides an attractive protocol for web servers.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Combining simulation and cutting plane methods in service systems.\n \n \n \n \n\n\n \n Atlason, J.; Epelman, M.; and Henderson, S.\n\n\n \n\n\n\n In Reinig, P., editor(s), Proceedings of the 2002 National Science Foundation Design, Service and Manufacture Grantees and Research Conference, 2002. \n \n\n\n\n
\n\n\n\n \n \n \"Combining paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{atlepehen02nsf,\n\tabstract = {In this paper we describe a method that combines simulation and cutting plane methods to solve resource allocation and scheduling problems. We solve a relaxed linear (integer) program iteratively and pass the solution of each iteration to a simulation. The results of the simulation are used to generate constraints in the linear (integer) program. We provide conditions under which the solution of the linear (integer) program converges to an optimal solution of the unrelaxed problem. The concavity of the underlying service level function is critical for the method and we present a linear programming method to numerically check the concavity of a function.},\n\tauthor = {J.\\ Atlason and M.~A.\\ Epelman and S.~G.\\ Henderson},\n\tbooktitle = {Proceedings of the 2002 National Science Foundation Design, Service and Manufacture Grantees and Research Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {P.\\ Reinig},\n\ttitle = {Combining simulation and cutting plane methods in service systems},\n\turl_paper = {pubs/NSFCutting.pdf},\n\tyear = 2002}\n\n
\n
\n\n\n
\n In this paper we describe a method that combines simulation and cutting plane methods to solve resource allocation and scheduling problems. We solve a relaxed linear (integer) program iteratively and pass the solution of each iteration to a simulation. The results of the simulation are used to generate constraints in the linear (integer) program. We provide conditions under which the solution of the linear (integer) program converges to an optimal solution of the unrelaxed problem. The concavity of the underlying service level function is critical for the method and we present a linear programming method to numerically check the concavity of a function.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Panel on current issues in simulation input modeling.\n \n \n \n \n\n\n \n Barton, R. R.; Cheng, R. C. H.; Chick, S. E.; Henderson, S. G.; Law, A. M.; Leemis, L. M.; Schmeiser, B. W.; Schruben, L. W.; and Wilson, J. R.\n\n\n \n\n\n\n In Yücesan, E.; Chen, C.; Snowdon, J. L.; and Charnes, J. M., editor(s), Proceedings of the 2002 Winter Simulation Conference, pages 353–369, Piscataway, NJ, 2002. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Panel paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{baretal02,\n\tabstract = {In recent years, substantial progress has been made in the development of powerful new approaches to modeling and generation of the stochastic input processes driving simula- tion models. In this panel discussion, we examine some of the central issues and unresolved problems associated with each of these approaches to simulation input modeling.},\n\taddress = {Piscataway, NJ},\n\tauthor = {Russell R. Barton and Russell C. H. Cheng and Stephen E. Chick and Shane G. Henderson and Averill M. Law and Lawrence M. Leemis and Bruce W. Schmeiser and Lee W. Schruben and James R. Wilson},\n\tbooktitle = {Proceedings of the 2002 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {E. Y{\\"{u}}cesan and C.-H. Chen and J. L. Snowdon and J. M. Charnes},\n\torganization = {IEEE},\n\tpages = {353--369},\n\ttitle = {Panel on current issues in simulation input modeling},\n\turl_paper = {pubs/wilsonj-1i.pdf},\n\tyear = {2002}}\n\n
\n
\n\n\n
\n In recent years, substantial progress has been made in the development of powerful new approaches to modeling and generation of the stochastic input processes driving simula- tion models. In this panel discussion, we examine some of the central issues and unresolved problems associated with each of these approaches to simulation input modeling.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Chessboard distributions and random vectors with specified marginals and covariance matrix.\n \n \n \n \n\n\n \n Ghosh, S.; and Henderson, S.\n\n\n \n\n\n\n Operations Research, 50(5): 820–834. 2002.\n \n\n\n\n
\n\n\n\n \n \n \"Chessboard onlinecompanion\n  \n \n \n \"Chessboard paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{ghohen02,\n\tabstract = {There is a growing need for the ability to specify and generate correlated random variables as primitive inputs to stochastic models. Motivated by this need, several authors have explored the generation of random vectors with specified marginals, together with a specified covariance matrix, through the use of a transformation of a multivariate normal random vector (the NORTA method).\n\t\nA covariance matrix is said to be feasible for a given set of marginal distributions if a random vector exists with these characteristics. We develop a computational approach for establishing whether a given covariance matrix is feasible for a given set of marginals. The approach is used to rigorously establish that there are sets of marginals with feasible covariance matrix that the NORTA method cannot match. In such cases, we show how to modify the initialization phase of NORTA so that it will exactly match the marginals, and approximately match the desired covariance matrix.\n\nAn important feature of our analysis is that we show that for almost any covariance matrix (in a certain precise sense), our computational procedure either explicitly provides a construction of a random vector with the required properties, or establishes that no such random vector exists.},\n\tauthor = {Ghosh, S. and Henderson, S.~G.},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Operations Research},\n\tnumber = {5},\n\tpages = {820--834},\n\ttitle = {Chessboard distributions and random vectors with specified marginals and covariance matrix},\n\turl_onlinecompanion = {pubs/ChessboardOROC.pdf},\n\turl_paper = {pubs/ChessboardOR.pdf},\n\tvolume = {50},\n\tyear = 2002}\n\n
\n
\n\n\n
\n There is a growing need for the ability to specify and generate correlated random variables as primitive inputs to stochastic models. Motivated by this need, several authors have explored the generation of random vectors with specified marginals, together with a specified covariance matrix, through the use of a transformation of a multivariate normal random vector (the NORTA method). A covariance matrix is said to be feasible for a given set of marginal distributions if a random vector exists with these characteristics. We develop a computational approach for establishing whether a given covariance matrix is feasible for a given set of marginals. The approach is used to rigorously establish that there are sets of marginals with feasible covariance matrix that the NORTA method cannot match. In such cases, we show how to modify the initialization phase of NORTA so that it will exactly match the marginals, and approximately match the desired covariance matrix. An important feature of our analysis is that we show that for almost any covariance matrix (in a certain precise sense), our computational procedure either explicitly provides a construction of a random vector with the required properties, or establishes that no such random vector exists.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Properties of the NORTA method in higher dimensions.\n \n \n \n \n\n\n \n Ghosh, S.; and Henderson, S.\n\n\n \n\n\n\n In Yücesan, E.; Chen, C.; Snowdon, J.; and Charnes, J., editor(s), Proceedings of the 2002 Winter Simulation Conference, pages 263–269, Piscataway NJ, 2002. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Properties paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ghohen02wsc,\n\tabstract = {The NORTA method for multivariate generation is a fast general purpose method for generating samples of a random vector with given marginal distributions and given product-moment or rank correlation matrix. However, this method has been shown to fail to work for some feasible correlation matrices. (A matrix is feasible if there exists a random vector with the given marginal distributions and the matrix as the correlation matrix.) We investigate how this feasibility problem behaves as the dimension of the random vector is increased and find the problem to become acute rapidly. We also find that a modified NORTA procedure, augmented by a semidefinite program (SDP) that aims to generate a correlation matrix close to the desired one, performs well with increasing dimension.},\n\taddress = {Piscataway NJ},\n\tauthor = {Ghosh, S. and Henderson, S.~G.},\n\tbooktitle = {Proceedings of the 2002 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {Y{\\"{u}}cesan,E. and Chen,C.~H. and Snowdon,J.~L. and Charnes, J.~M.},\n\tpages = {263--269},\n\tpublisher = {IEEE},\n\ttitle = {Properties of the {NORTA} method in higher dimensions},\n\turl_paper = {pubs/NORTAProperties.pdf},\n\tyear = 2002}\n\n
\n
\n\n\n
\n The NORTA method for multivariate generation is a fast general purpose method for generating samples of a random vector with given marginal distributions and given product-moment or rank correlation matrix. However, this method has been shown to fail to work for some feasible correlation matrices. (A matrix is feasible if there exists a random vector with the given marginal distributions and the matrix as the correlation matrix.) We investigate how this feasibility problem behaves as the dimension of the random vector is increased and find the problem to become acute rapidly. We also find that a modified NORTA procedure, augmented by a semidefinite program (SDP) that aims to generate a correlation matrix close to the desired one, performs well with increasing dimension.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Approximating martingales for variance reduction in Markov process simulation.\n \n \n \n \n\n\n \n Henderson, S.; and Glynn, P.\n\n\n \n\n\n\n Mathematics of Operations Research, 27: 253–271. 2002.\n \n\n\n\n
\n\n\n\n \n \n \"Approximating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hengly02,\n\tabstract = {``Knowledge of either analytical or numerical approximations should enable more efficient simulation estimators to be constructed.'' This principle seems intuitively plausible and certainly attractive, yet no completely satisfactory general methodology has been developed to exploit it. We present a new approach for obtaining variance reduction in Markov process simulation that is applicable to a vast array of different performance measures. The approach relies on the construction of a martingale that is then used as an internal control variate.},\n\tauthor = {Henderson, S.~G. and Glynn, P.~W.},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Mathematics of Operations Research},\n\tpages = {253--271},\n\ttitle = {Approximating martingales for variance reduction in {M}arkov process simulation},\n\turl_paper = {pubs/AMP1.pdf},\n\tvolume = {27},\n\tyear = 2002}\n\n
\n
\n\n\n
\n ``Knowledge of either analytical or numerical approximations should enable more efficient simulation estimators to be constructed.'' This principle seems intuitively plausible and certainly attractive, yet no completely satisfactory general methodology has been developed to exploit it. We present a new approach for obtaining variance reduction in Markov process simulation that is applicable to a vast array of different performance measures. The approach relies on the construction of a martingale that is then used as an internal control variate.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Identifying effective polices for multiclass networks.\n \n \n \n \n\n\n \n Henderson, S.; and Meyn, S.\n\n\n \n\n\n\n In Reinig, P., editor(s), Proceedings of the 2002 National Science Foundation Design, Service and Manufacture Grantees and Research Conference, 2002. \n \n\n\n\n
\n\n\n\n \n \n \"Identifying paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{henmey02nsf,\n\tabstract = {This paper investigates procedures for identifying effective policies for regulation of multiclass queueing networks. Policy synthesis\nis based on consideration of a related ¤uid network, and then translating a policy from this abstraction to the discrete network of interest. Translation is made possible through the use of safety stocks that maintain feasibility of ¤uid trajectories, and prevent starvation of critical resources.\n\nIn several examples it is found that the performance of the system as a function of the safety stock levels is convex, suggesting the use of cutting plane algorithms to identify optimal safety-stock levels. This approach is investigated using a 2-parameter network model that allows independent modelling of variability and mean processing-rates. Algorithms based on simulation of this model are proposed to evaluate and tune safety stock settings.},\n\tauthor = {S.~G.\\ Henderson and S.~P.\\ Meyn},\n\tbooktitle = {Proceedings of the 2002 National Science Foundation Design, Service and Manufacture Grantees and Research Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {P.\\ Reinig},\n\ttitle = {Identifying effective polices for multiclass networks},\n\turl_paper = {pubs/NSFEffective.pdf},\n\tyear = 2002}\n\n
\n
\n\n\n
\n This paper investigates procedures for identifying effective policies for regulation of multiclass queueing networks. Policy synthesis is based on consideration of a related ¤uid network, and then translating a policy from this abstraction to the discrete network of interest. Translation is made possible through the use of safety stocks that maintain feasibility of ¤uid trajectories, and prevent starvation of critical resources. In several examples it is found that the performance of the system as a function of the safety stock levels is convex, suggesting the use of cutting plane algorithms to identify optimal safety-stock levels. This approach is investigated using a 2-parameter network model that allows independent modelling of variability and mean processing-rates. Algorithms based on simulation of this model are proposed to evaluate and tune safety stock settings.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2001\n \n \n (5)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Two issues in setting call center staffing levels.\n \n \n \n \n\n\n \n Chen, B. P. K.; and Henderson, S. G.\n\n\n \n\n\n\n Annals of Operations Research, 108: 175–192. 2001.\n \n\n\n\n
\n\n\n\n \n \n \"Two paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{chehen01,\n\tabstract = {Motivated by a problem facing the Police Communication Centre in Auckland, New Zealand, we consider the setting of staffing levels in a call centre with priority customers. The choice of staffing level over any particular time period (e.g., Monday from 8am - 9am) relies on accurate arrival rate information. The usual method for identifying the arrival rate based on historical data can, in some cases, lead to considerable errors in performance estimates for a given staffing level. We explain why, identify three potential causes of the difficulty, and describe a method for detecting and addressing such a problem.},\n\tauthor = {B. P. K. Chen and S. G. Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Annals of Operations Research},\n\tpages = {175--192},\n\ttitle = {Two issues in setting call center staffing levels},\n\turl_paper = {pubs/TwoIssues.pdf},\n\tvolume = {108},\n\tyear = {2001}}\n\n
\n
\n\n\n
\n Motivated by a problem facing the Police Communication Centre in Auckland, New Zealand, we consider the setting of staffing levels in a call centre with priority customers. The choice of staffing level over any particular time period (e.g., Monday from 8am - 9am) relies on accurate arrival rate information. The usual method for identifying the arrival rate based on historical data can, in some cases, lead to considerable errors in performance estimates for a given staffing level. We explain why, identify three potential causes of the difficulty, and describe a method for detecting and addressing such a problem.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Chessboard distributions.\n \n \n \n \n\n\n \n Ghosh, S.; and Henderson, S.\n\n\n \n\n\n\n In Peters, B.; Smith, J.; Medeiros, D.; and Rohrer, M., editor(s), Proceedings of the 2001 Winter Simulation Conference, pages 385–393, Piscataway NJ, 2001. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Chessboard paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{ghohen01wsc,\n\tabstract = {We review chessboard distributions for modeling partially specified finite-dimensional random vectors. Chessboard distributions can match a given set of marginals, a given covariance structure, and various other constraints on the distribution of a random vector. It is necessary to solve a potentially large linear program to set up a chessboard distribution, but random vectors can then be rapidly generated.},\n\taddress = {Piscataway NJ},\n\tauthor = {Ghosh, S. and Henderson, S.~G.},\n\tbooktitle = {Proceedings of the 2001 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {Peters,B.~A. and Smith,J.~S. and Medeiros,D.~J. and Rohrer,M.~W.},\n\tpages = {385--393},\n\tpublisher = {IEEE},\n\ttitle = {Chessboard distributions},\n\turl_paper = {pubs/chessboardwsc01.pdf},\n\tyear = 2001}\n\n
\n
\n\n\n
\n We review chessboard distributions for modeling partially specified finite-dimensional random vectors. Chessboard distributions can match a given set of marginals, a given covariance structure, and various other constraints on the distribution of a random vector. It is necessary to solve a potentially large linear program to set up a chessboard distribution, but random vectors can then be rapidly generated.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mathematics for simulation.\n \n \n \n \n\n\n \n Henderson, S.\n\n\n \n\n\n\n In Peters, B.; Smith, J.; Medeiros, D.; and Rohrer, M., editor(s), Proceedings of the 2001 Winter Simulation Conference, pages 83–94, Piscataway NJ, 2001. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Mathematics paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hen01,\n\tabstract = {I survey several mathematical techniques and results that are useful in the context of stochastic simulation. The concepts are introduced through the study of a simple model of ambulance operation to ensure clarity, concreteness and cohesion.},\n\taddress = {Piscataway NJ},\n\tauthor = {Henderson, S.~G.},\n\tbooktitle = {Proceedings of the 2001 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {Peters,B.~A. and Smith,J.~S. and Medeiros,D.~J. and Rohrer,M.~W.},\n\torganization = {IEEE},\n\tpages = {83--94},\n\ttitle = {Mathematics for simulation},\n\turl_paper = {pubs/maths4sim01.pdf},\n\tyear = 2001}\n\n
\n
\n\n\n
\n I survey several mathematical techniques and results that are useful in the context of stochastic simulation. The concepts are introduced through the study of a simple model of ambulance operation to ensure clarity, concreteness and cohesion.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Regenerative steady-state simulation of discrete event systems.\n \n \n \n \n\n\n \n Henderson, S. G.; and Glynn, P. W.\n\n\n \n\n\n\n ACM Transactions on Modeling and Computer Simulation, 11: 313–345. 2001.\n \n\n\n\n
\n\n\n\n \n \n \"Regenerative paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n  \n \n 1 download\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hengly01,\n\tabstract = {The regenerative method possesses certain asymptotic properties that dominate those of other steady-state simulation output analysis methods, such as batch means. Therefore, applying the regenerative method to steady-state discrete-event system simulations is of great interest. In this paper, we survey the state of the art in this area. The main difficulty in applying the regenerative method in our context is perhaps in identifying regenerative cycle boundaries. We examine this issue through the use of the ``smoothness index.'' Regenerative cycles are easily identified in systems with unit smoothness index, but this is typically not the case for systems with nonunit smoothness index. We show that ``most'' (in a certain precise sense) discrete-event simulations will have nonunit smoothness index, and extend the asymptotic theory of regenerative simulation estimators to this context.},\n\tannote = {pubs/RegenDEDSTOMACS.pdf},\n\tauthor = {Shane G.\\ Henderson and Peter W.\\ Glynn},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2018-07-16 17:39:37 +0000},\n\tjournal = {ACM Transactions on Modeling and Computer Simulation},\n\tpages = {313--345},\n\ttitle = {Regenerative steady-state simulation of discrete event systems},\n\turl_paper = {https://dl.acm.org/authorize?N654071},\n\tvolume = 11,\n\tyear = 2001}\n\n
\n
\n\n\n
\n The regenerative method possesses certain asymptotic properties that dominate those of other steady-state simulation output analysis methods, such as batch means. Therefore, applying the regenerative method to steady-state discrete-event system simulations is of great interest. In this paper, we survey the state of the art in this area. The main difficulty in applying the regenerative method in our context is perhaps in identifying regenerative cycle boundaries. We examine this issue through the use of the ``smoothness index.'' Regenerative cycles are easily identified in systems with unit smoothness index, but this is typically not the case for systems with nonunit smoothness index. We show that ``most'' (in a certain precise sense) discrete-event simulations will have nonunit smoothness index, and extend the asymptotic theory of regenerative simulation estimators to this context.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Computing densities for Markov chains via simulation.\n \n \n \n \n\n\n \n Henderson, S.; and Glynn, P.\n\n\n \n\n\n\n Mathematics of Operations Research, 26: 375–400. 2001.\n \n\n\n\n
\n\n\n\n \n \n \"Computing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hengly01b,\n\tabstract = {We introduce a new class of density estimators, termed look-ahead density estimators, for performance measures associated with a Markov chain. Look-ahead density estimators are given for both transient and steady-state quantities. Look-ahead density estimators converge faster (especially in multidimensional problems) and empirically give visually superior results relative to more standard estimators, such as kernel density estimators. Several numerical examples that demonstrate the potential applicability of look-ahead density estimation are given.},\n\tauthor = {Henderson, S.~G. and Glynn, P.~W.},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Mathematics of Operations Research},\n\tpages = {375--400},\n\ttitle = {Computing densities for {M}arkov chains via simulation},\n\turl_paper = {pubs/ComputeDensities.pdf},\n\tvolume = {26},\n\tyear = 2001}\n\n
\n
\n\n\n
\n We introduce a new class of density estimators, termed look-ahead density estimators, for performance measures associated with a Markov chain. Look-ahead density estimators are given for both transient and steady-state quantities. Look-ahead density estimators converge faster (especially in multidimensional problems) and empirically give visually superior results relative to more standard estimators, such as kernel density estimators. Several numerical examples that demonstrate the potential applicability of look-ahead density estimation are given.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 2000\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n BartSim: A tool for analysing and improving ambulance performance in Auckland, New Zealand.\n \n \n \n \n\n\n \n Henderson, S. G.; and Mason, A. J.\n\n\n \n\n\n\n In Proceedings of the 35th Conference of the Operational Research Society of New Zealand, pages 57–64, 2000. \n \n\n\n\n
\n\n\n\n \n \n \"BartSim: paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{henmas00,\n\tabstract = {The St. John Ambulance Service (Auckland Region) in New Zealand (St Johns) supplies ambulance services to the Crown Health Enterprises. Current and future contracts specify several minimum performance targets that St Johns must achieve. As the city of Auckland grows, roads become congested, and population demographics change. These changes mean that St. Johns faces a very difficult problem: how many ambulances are needed, and where should they be placed in order to meet the service targets efficiently.\n\nA preliminary study using queueing theory suggested that more ambulances were needed. However, the assumptions required in the queueing model were such that a more realistic modelling approach was deemed necessary to verify and refine the queueing model results. We discuss a simulation and analysis software tool BartSim developed by the authors that is currently being used to address the issues St. Johns faces. The results obtained from BartSim will be used to drive changes to the rostering process for staff at St. Johns.},\n\tauthor = {Shane G. Henderson and Andrew J. Mason},\n\tbooktitle = {Proceedings of the 35th Conference of the Operational Research Society of New Zealand},\n\tdate-added = {2016-01-10 19:26:34 +0000},\n\tdate-modified = {2016-01-10 19:27:37 +0000},\n\tpages = {57--64},\n\ttitle = {{BartSim}: A tool for analysing and improving ambulance performance in Auckland, New Zealand},\n\turl_paper = {pubs/BartSim2000.pdf},\n\tyear = {2000}}\n\n
\n
\n\n\n
\n The St. John Ambulance Service (Auckland Region) in New Zealand (St Johns) supplies ambulance services to the Crown Health Enterprises. Current and future contracts specify several minimum performance targets that St Johns must achieve. As the city of Auckland grows, roads become congested, and population demographics change. These changes mean that St. Johns faces a very difficult problem: how many ambulances are needed, and where should they be placed in order to meet the service targets efficiently. A preliminary study using queueing theory suggested that more ambulances were needed. However, the assumptions required in the queueing model were such that a more realistic modelling approach was deemed necessary to verify and refine the queueing model results. We discuss a simulation and analysis software tool BartSim developed by the authors that is currently being used to address the issues St. Johns faces. The results obtained from BartSim will be used to drive changes to the rostering process for staff at St. Johns.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Mathematics for simulation.\n \n \n \n \n\n\n \n Henderson, S.\n\n\n \n\n\n\n In Joines, J.; Barton, R.; Kang, K.; and Fishwick, P., editor(s), Proceedings of the 2000 Winter Simulation Conference, pages 137–146, Piscataway NJ, 2000. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Mathematics paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hen00,\n\tabstract = {I survey several mathematical techniques and results that are useful in the context of stochastic simulation. The concepts are introduced through the study of a simple model of ambulance operation to ensure clarity, concreteness and cohesion.},\n\taddress = {Piscataway NJ},\n\tauthor = {Henderson, S.~G.},\n\tbooktitle = {Proceedings of the 2000 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {Joines,J.~A. and Barton,R.~R. and Kang,K. and Fishwick,P.~A.},\n\torganization = {IEEE},\n\tpages = {137--146},\n\ttitle = {Mathematics for simulation},\n\turl_paper = {pubs/maths4sim.pdf},\n\tyear = 2000}\n\n
\n
\n\n\n
\n I survey several mathematical techniques and results that are useful in the context of stochastic simulation. The concepts are introduced through the study of a simple model of ambulance operation to ensure clarity, concreteness and cohesion.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Generating ``dependent'' quasi-random numbers.\n \n \n \n \n\n\n \n Henderson, S.; Chiera, B.; and Cooke, R.\n\n\n \n\n\n\n In Joines, J.; Barton, R.; Kang, K.; and Fishwick, P., editor(s), Proceedings of the 2000 Winter Simulation Conference, pages 527–536, Piscataway NJ, 2000. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Generating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{henchicoo00wsc,\n\tabstract = {Under certain conditions on the integrand, quasi-Monte Carlo methods for estimating integrals (expectations) converge faster asymptotically than Monte Carlo methods. Motivated by this result we consider the generation of quasi-random vectors with given marginals and given correlation matrix. We extend the ``Normal To Anything'' (NORTA) method, introduced by Cario and Nelson, to this context, and term the extension the ``Quasi-Random to Anything'' (QUARTA) method.},\n\taddress = {Piscataway NJ},\n\tauthor = {Henderson,S.~G. and Chiera,B.~A. and Cooke,R.~M.},\n\tbooktitle = {Proceedings of the 2000 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {Joines,J.~A. and Barton,R.~R. and Kang,K. and Fishwick,P.~A.},\n\tpages = {527--536},\n\tpublisher = {IEEE},\n\ttitle = {Generating ``dependent'' quasi-random numbers},\n\turl_paper = {pubs/depqrn.pdf},\n\tyear = 2000}\n\n
\n
\n\n\n
\n Under certain conditions on the integrand, quasi-Monte Carlo methods for estimating integrals (expectations) converge faster asymptotically than Monte Carlo methods. Motivated by this result we consider the generation of quasi-random vectors with given marginals and given correlation matrix. We extend the ``Normal To Anything'' (NORTA) method, introduced by Cario and Nelson, to this context, and term the extension the ``Quasi-Random to Anything'' (QUARTA) method.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Perfect simulation for Markov chains arising from discrete-event simulation.\n \n \n \n \n\n\n \n Henderson, S.; and Tweedie, R.\n\n\n \n\n\n\n In Sreenivas, R.; and Jones, D., editor(s), Proceedings of the 38th Annual Allerton Conference on Communication, Control, and Computing, pages 1125–1134, Urbana-Champaign, Illinois, 2000. University of Illinois\n \n\n\n\n
\n\n\n\n \n \n \"Perfect paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hentwe00,\n\tabstract = {Virtually any discrete-event simulation can be rigorously defined as a Markov chain evolving on a general state space, and under appropriate conditions, the chain has a unique stationary probability distribution. Many steady-state performance measures can be expressed in terms of the stationary probability distribution of the chain.\n\t\nWe would like to apply ``coupling from the past'' algorithms to obtain samples from the stationary probability distribution of such chains. Unfortunately, the structural properties of the chains arising from discrete-event simulations preclude the immediate application of current coupling from the past algorithms. We describe why this is the case, and extend a class of coupling from the past algorithms so that they may be applied in this setting.},\n\taddress = {Urbana-Champaign, Illinois},\n\tauthor = {S.~G.\\ Henderson and R.~L.\\ Tweedie},\n\tbooktitle = {Proceedings of the 38th Annual Allerton Conference on Communication, Control, and Computing},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {R.~S.\\ Sreenivas and D.~L.\\ Jones},\n\torganization = {University of Illinois},\n\tpages = {1125--1134},\n\ttitle = {Perfect simulation for {M}arkov chains arising from discrete-event simulation},\n\turl_paper = {pubs/HendersonTweedie.pdf},\n\tyear = 2000}\n\n
\n
\n\n\n
\n Virtually any discrete-event simulation can be rigorously defined as a Markov chain evolving on a general state space, and under appropriate conditions, the chain has a unique stationary probability distribution. Many steady-state performance measures can be expressed in terms of the stationary probability distribution of the chain. We would like to apply ``coupling from the past'' algorithms to obtain samples from the stationary probability distribution of such chains. Unfortunately, the structural properties of the chains arising from discrete-event simulations preclude the immediate application of current coupling from the past algorithms. We describe why this is the case, and extend a class of coupling from the past algorithms so that they may be applied in this setting.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1999\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Derandomizing variance estimators.\n \n \n \n \n\n\n \n Henderson, S.; and Glynn, P.\n\n\n \n\n\n\n Operations Research, 47: 907–916. 1999.\n \n\n\n\n
\n\n\n\n \n \n \"Derandomizing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@article{hengly99derandom,\n\tabstract = {One may consider a discrete-event simulation as a Markov chain evolving on a suitably rich state space. One way that regenerative cycles may be constructed for general state-space Markov chains is to generate auxiliary coin- flip random variables at each transition, with a regeneration occurring if the coin-flip results in a success. The regenerative cycles are therefore randomized with respect to the sequence of states visited by the Markov chain. The point estimator for a steady-state performance measure does not depend on the cycle structure of the chain, but the variance estimator (that defines the width of a confidence interval for the performance measure) does. This implies that the variance estimator is randomized with respect to the visited states. We show how to ``derandomize'' the variance estimator through the use of conditioning. A new variance estimator is obtained that is consistent, and has lower variance than the standard estimator.},\n\tauthor = {Henderson, S.~G. and Glynn, P.~W.},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tjournal = {Operations Research},\n\tpages = {907--916},\n\ttitle = {Derandomizing variance estimators},\n\turl_paper = {pubs/Derand.pdf},\n\tvolume = 47,\n\tyear = 1999}\n\n
\n
\n\n\n
\n One may consider a discrete-event simulation as a Markov chain evolving on a suitably rich state space. One way that regenerative cycles may be constructed for general state-space Markov chains is to generate auxiliary coin- flip random variables at each transition, with a regeneration occurring if the coin-flip results in a success. The regenerative cycles are therefore randomized with respect to the sequence of states visited by the Markov chain. The point estimator for a steady-state performance measure does not depend on the cycle structure of the chain, but the variance estimator (that defines the width of a confidence interval for the performance measure) does. This implies that the variance estimator is randomized with respect to the visited states. We show how to ``derandomize'' the variance estimator through the use of conditioning. A new variance estimator is obtained that is consistent, and has lower variance than the standard estimator.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Can the regenerative method be applied to discrete-event simulation?.\n \n \n \n \n\n\n \n Henderson, S.; and Glynn, P.\n\n\n \n\n\n\n In Farrington, P.; Black Nembhard, H.; Sturrock, D.; and Evans, G., editor(s), Proceedings of the 1999 Winter Simulation Conference, pages 367–373, Piscataway NJ, 1999. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Can paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{hengly99wsc,\n\tabstract = {The regenerative method enjoys asymptotic properties that make it a highly desirable approach for steady-state simulation output analysis. It has been shown that virtually all discrete-event simulations are regenerative. However, the method is not in widespread use, perhaps primarily because of a difficulty in identifying regeneration times.\n \nOur goal in this paper is to highlight the essence of the difficulty in identifying regeneration times in discrete-event simulations. We focus on a very simple example of a discrete-event simulation, and explore its regenerative properties.\n \nWe show that for our example, it is possible to explicitly determine regeneration times. The ideas that are used to establish this fact might prove useful in identifying regeneration times in more general discrete-event system simulations.},\n\taddress = {Piscataway NJ},\n\tannote = {pubs/DEDSWSC99.pdf},\n\tauthor = {Henderson, S.~G. and Glynn, P.~W.},\n\tbooktitle = {Proceedings of the 1999 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2018-07-16 17:40:08 +0000},\n\teditor = {Farrington, P.~A. and Black Nembhard, H. and Sturrock, D.~T. and Evans, G.~W.},\n\torganization = {IEEE},\n\tpages = {367--373},\n\ttitle = {Can the regenerative method be applied to discrete-event simulation?},\n\turl_paper = {https://dl.acm.org/authorize?N654073},\n\tyear = 1999}\n\n
\n
\n\n\n
\n The regenerative method enjoys asymptotic properties that make it a highly desirable approach for steady-state simulation output analysis. It has been shown that virtually all discrete-event simulations are regenerative. However, the method is not in widespread use, perhaps primarily because of a difficulty in identifying regeneration times. Our goal in this paper is to highlight the essence of the difficulty in identifying regeneration times in discrete-event simulations. We focus on a very simple example of a discrete-event simulation, and explore its regenerative properties. We show that for our example, it is possible to explicitly determine regeneration times. The ideas that are used to establish this fact might prove useful in identifying regeneration times in more general discrete-event system simulations.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimating ambulance requirements in Auckland, New Zealand.\n \n \n \n \n\n\n \n Henderson, S.; and Mason, A. J.\n\n\n \n\n\n\n In Farrington, P.; Black Nembhard, H.; Sturrock, D.; and Evans, G., editor(s), Proceedings of the 1999 Winter Simulation Conference, pages 1670–1674, Piscataway NJ, 1999. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Estimating paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{henmas99,\n\tabstract = {The St. John Ambulance Service (Auckland Region) in New Zealand (St Johns) supplies ambulance services to Crown Health Enterprises. Current and future contracts specify several minimum performance targets that St Johns must achieve. As the city of Auckland grows, roads become congested, and population demographics change. These changes mean that St. Johns faces a very difficult problem: how many ambulances are needed, and where should they be placed in order to meet the service targets efficiently.\n\nA preliminary study using queueing theory established that more ambulances were needed and suggested placements. However, the assumptions required in the queueing model were such that a more realistic modelling approach was deemed necessary to verify and refine the queueing model results. We discuss a simulation and analysis software tool BartSim developed by the authors, that is currently being used to address the issues St. Johns faces. The results obtained from BartSim are used to drive changes to the rostering process for staff at St. Johns.},\n\taddress = {Piscataway NJ},\n\tannote = {pubs/EstimatingAmb99.pdf},\n\tauthor = {Henderson, S.~G. and A. J. Mason},\n\tbooktitle = {Proceedings of the 1999 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2018-07-16 17:40:46 +0000},\n\teditor = {Farrington, P.~A. and Black Nembhard, H. and Sturrock, D.~T. and Evans, G.~W.},\n\torganization = {IEEE},\n\tpages = {1670--1674},\n\ttitle = {Estimating ambulance requirements in {A}uckland, {N}ew {Z}ealand},\n\turl_paper = {https://dl.acm.org/authorize?N654072},\n\tyear = 1999}\n\n
\n
\n\n\n
\n The St. John Ambulance Service (Auckland Region) in New Zealand (St Johns) supplies ambulance services to Crown Health Enterprises. Current and future contracts specify several minimum performance targets that St Johns must achieve. As the city of Auckland grows, roads become congested, and population demographics change. These changes mean that St. Johns faces a very difficult problem: how many ambulances are needed, and where should they be placed in order to meet the service targets efficiently. A preliminary study using queueing theory established that more ambulances were needed and suggested placements. However, the assumptions required in the queueing model were such that a more realistic modelling approach was deemed necessary to verify and refine the queueing model results. We discuss a simulation and analysis software tool BartSim developed by the authors, that is currently being used to address the issues St. Johns faces. The results obtained from BartSim are used to drive changes to the rostering process for staff at St. Johns.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1998\n \n \n (3)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Heuristics in Rostering for Call Centres.\n \n \n \n \n\n\n \n Henderson, S. G.; Mason, A. J.; Ziedins, I.; Thomson, R.; Burgess, D.; and Seabrook, T.\n\n\n \n\n\n\n In Proceedings of the 33rd Conference of the Operational Research Society of New Zealand, pages 253–261, 1998. \n \n\n\n\n
\n\n\n\n \n \n \"Heuristics paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{henetal98,\n\tabstract = {An important new feature on the business scene is the development of call centres, whereby a pool of staff is used to answer incoming calls from customers. This project develops a model that enables staffing levels to be determined to meet specified quality targets on customer wait times. Unlike most previous work, this new model explicitly considers call arrival rates that vary during the day, and exploits linkage between periods to keep staffing costs to a minimum.},\n\tauthor = {S. G. Henderson and A. J. Mason and I. Ziedins and R. Thomson and D. Burgess and T. Seabrook},\n\tbooktitle = {Proceedings of the 33rd Conference of the Operational Research Society of New Zealand},\n\tdate-added = {2016-01-10 19:28:22 +0000},\n\tdate-modified = {2016-01-10 19:29:31 +0000},\n\tpages = {253--261},\n\ttitle = {Heuristics in Rostering for Call Centres},\n\turl_paper = {pubs/ros2.pdf},\n\tyear = {1998}}\n\n
\n
\n\n\n
\n An important new feature on the business scene is the development of call centres, whereby a pool of staff is used to answer incoming calls from customers. This project develops a model that enables staffing levels to be determined to meet specified quality targets on customer wait times. Unlike most previous work, this new model explicitly considers call arrival rates that vary during the day, and exploits linkage between periods to keep staffing costs to a minimum.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Estimation of stationary densities for Markov chains.\n \n \n \n \n\n\n \n Glynn, P.; and Henderson, S.\n\n\n \n\n\n\n In Medeiros, D.; Watson, E.; Carson, J. S.; and Manivannan, M. S., editor(s), Proceedings of the 1998 Winter Simulation Conference, pages 647–652, Piscataway, NJ, 1998. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Estimation paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{glyhen98,\n\tabstract = {We describe a new estimator of the stationary density of a Markov chain on general state space. The new estimator is easier to compute, converges faster, and empirically gives visually superior estimates than more standard estimators such as nearest-neighbour or kernel density estimators.\n},\n\taddress = {Piscataway, NJ},\n\tauthor = {P.~W. Glynn and S.~G. Henderson},\n\tbooktitle = {Proceedings of the 1998 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {D. Medeiros and E. Watson and J. S. Carson and M. S. Manivannan},\n\torganization = {IEEE},\n\tpages = {647--652},\n\ttitle = {Estimation of stationary densities for {M}arkov chains},\n\turl_paper = {pubs/dens.pdf},\n\tyear = 1998}\n\n
\n
\n\n\n
\n We describe a new estimator of the stationary density of a Markov chain on general state space. The new estimator is easier to compute, converges faster, and empirically gives visually superior estimates than more standard estimators such as nearest-neighbour or kernel density estimators. \n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Rostering by iterating integer programming and simulation.\n \n \n \n \n\n\n \n Henderson, S. G.; and Mason, A. J.\n\n\n \n\n\n\n In Medeiros, D.; Watson, E.; Carson, J. S.; and Manivannan, M. S., editor(s), Proceedings of the 1998 Winter Simulation Conference, pages 677–684, Piscataway, NJ, 1998. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Rostering paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{henmas98,\n\tabstract = {We present a new technique (RIIPS) for solving rostering problems in the presence of service uncertainty. RIIPS stands for ``Rostering by Iterating Integer Programming and Simulation''. RIIPS allows great complexity of the stochastic system being rostered. This modelling freedom comes at a price, as the approach can be extremely computationally intensive. Therefore any reduction in computational effort using, for example, efficiency improvement techniques, is of great interest. We specify several ways in which these may be applied.},\n\taddress = {Piscataway, NJ},\n\tauthor = {S. G. Henderson and A. J. Mason},\n\tbooktitle = {Proceedings of the 1998 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\teditor = {D. Medeiros and E. Watson and J. S. Carson and M. S. Manivannan},\n\torganization = {IEEE},\n\tpages = {677--684},\n\ttitle = {Rostering by iterating integer programming and simulation},\n\turl_paper = {pubs/riips.pdf},\n\tyear = 1998}\n\n
\n
\n\n\n
\n We present a new technique (RIIPS) for solving rostering problems in the presence of service uncertainty. RIIPS stands for ``Rostering by Iterating Integer Programming and Simulation''. RIIPS allows great complexity of the stochastic system being rostered. This modelling freedom comes at a price, as the approach can be extremely computationally intensive. Therefore any reduction in computational effort using, for example, efficiency improvement techniques, is of great interest. We specify several ways in which these may be applied.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1997\n \n \n (4)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n Improving quality in introductory industrial engineering through case studies and communication.\n \n \n \n\n\n \n Birge, J. R.; Henderson, S. G.; and Olsen, L.\n\n\n \n\n\n\n In ASEE Annual Conference Proceedings, 1997. \n \n\n\n\n
\n\n\n\n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{birhenols97,\n\tauthor = {J. R. Birge and S. G. Henderson and L. Olsen},\n\tbooktitle = {ASEE Annual Conference Proceedings},\n\tdate-added = {2016-01-10 19:30:59 +0000},\n\tdate-modified = {2016-01-10 19:30:59 +0000},\n\ttitle = {Improving quality in introductory industrial engineering through case studies and communication},\n\tyear = {1997}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Efficient simulation of multiclass queueing networks.\n \n \n \n \n\n\n \n Henderson, S.; and Meyn, S.\n\n\n \n\n\n\n In Andradóottir, S.; Healy, K.; Withers, D.; and Nelson, B., editor(s), Proceedings of the 1997 Winter Simulation Conference, pages 216–223, Piscataway NJ, 1997. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Efficient paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{henmey97,\n\tabstract = {General multiclass queueing systems are extremely difficult to analyze. A great deal of effort has been devoted to examining the question of stability and performance of such networks. However, the simulation of multiclass queueing networks as a tool for performance evaluation has received little attention.\nWe generate useful control variates for the steady-state simulation of multiclass queueing networks with Markovian structure. The resulting variance reductions greatly outweigh the cost of solving a minimization problem prior to the simulation, as evidenced through numerical examples.},\n\taddress = {Piscataway NJ},\n\tauthor = {S.~G.\\ Henderson and S.~P.\\ Meyn},\n\tbooktitle = {Proceedings of the 1997 Winter Simulation Conference},\n\tdate-added = {2016-01-10 17:47:03 +0000},\n\tdate-modified = {2016-01-10 17:47:03 +0000},\n\teditor = {S.\\ Andrad\\'{o}ottir and K.~J.\\ Healy and D.~H.\\ Withers and B.~L.\\ Nelson},\n\torganization = {IEEE},\n\tpages = {216--223},\n\ttitle = {Efficient simulation of multiclass queueing networks},\n\turl_paper = {pubs/reentrant.pdf},\n\tyear = 1997}\n\n
\n
\n\n\n
\n General multiclass queueing systems are extremely difficult to analyze. A great deal of effort has been devoted to examining the question of stability and performance of such networks. However, the simulation of multiclass queueing networks as a tool for performance evaluation has received little attention. We generate useful control variates for the steady-state simulation of multiclass queueing networks with Markovian structure. The resulting variance reductions greatly outweigh the cost of solving a minimization problem prior to the simulation, as evidenced through numerical examples.\n
\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Variance Reduction Via an Approximating Markov Process.\n \n \n \n \n\n\n \n Henderson, S. G.\n\n\n \n\n\n\n Ph.D. Thesis, Department of Operations Research, Stanford University, Stanford, CA, 1997.\n \n\n\n\n
\n\n\n\n \n \n \"Variance paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n\n \n  \n \n 2 downloads\n \n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@phdthesis{hen97,\n\taddress = {Stanford, CA},\n\tauthor = {Shane G.\\ Henderson},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 16:07:54 +0000},\n\tschool = {Department of Operations Research, Stanford University},\n\ttitle = {Variance Reduction Via an Approximating Markov Process},\n\turl_paper = {pubs/thesis.pdf},\n\tyear = {1997}}\n\n
\n
\n\n\n\n
\n\n\n
\n \n\n \n \n \n \n \n \n Computational efficiency in output analysis.\n \n \n \n \n\n\n \n Damerdji, H.; Henderson, S. G.; and Glynn, P. W.\n\n\n \n\n\n\n In Andradóottir, S.; Healy, K.; Withers, D.; and Nelson, B., editor(s), Proceedings of the 1997 Winter Simulation Conference, pages 208–215, Piscataway NJ, 1997. IEEE\n \n\n\n\n
\n\n\n\n \n \n \"Computational paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@inproceedings{dametal97,\n\tabstract = {A central quantity in steady-state simulation is the time-average variance constant.􏰅 Estimates of this quantity are needed (for example) for constructing confidence intervals, and several estimators have been proposed, including nonoverlapping and overlapping batch means methods, spectral methods, and the regenerative method. The asymptotic statistical properties of these estimators have been investigated but the computational complexity involved in computing them has received very little attention.\n\t\nWe assume a fixed simulation run-length, as opposed to sequential methods in which the run-length is determined dynamically. In order to consistently estimate the time-average variance constant, all of the estimators require an amount of computation that is linear in the time-horizon simulated, with the exception of spectral methods which require a superlinear amount of computation.\n},\n\taddress = {Piscataway NJ},\n\tauthor = {H. Damerdji and S. G. Henderson and P. W. Glynn},\n\tbooktitle = {Proceedings of the 1997 Winter Simulation Conference},\n\tdate-added = {2016-01-10 16:07:54 +0000},\n\tdate-modified = {2016-01-10 17:48:08 +0000},\n\teditor = {S.\\ Andrad\\'{o}ottir and K.~J.\\ Healy and D.~H.\\ Withers and B.~L.\\ Nelson},\n\torganization = {IEEE},\n\tpages = {208--215},\n\ttitle = {Computational efficiency in output analysis},\n\turl_paper = {pubs/compeff.pdf},\n\tyear = 1997}\n\n
\n
\n\n\n
\n A central quantity in steady-state simulation is the time-average variance constant.􏰅 Estimates of this quantity are needed (for example) for constructing confidence intervals, and several estimators have been proposed, including nonoverlapping and overlapping batch means methods, spectral methods, and the regenerative method. The asymptotic statistical properties of these estimators have been investigated but the computational complexity involved in computing them has received very little attention. We assume a fixed simulation run-length, as opposed to sequential methods in which the run-length is determined dynamically. In order to consistently estimate the time-average variance constant, all of the estimators require an amount of computation that is linear in the time-horizon simulated, with the exception of spectral methods which require a superlinear amount of computation. \n
\n\n\n
\n\n\n\n\n\n
\n
\n\n
\n
\n  \n 1995\n \n \n (1)\n \n \n
\n
\n \n \n
\n \n\n \n \n \n \n \n \n Derandomizing and rerandomizing variance estimators.\n \n \n \n \n\n\n \n Henderson, S. G.; and Glynn, P. W.\n\n\n \n\n\n\n Technical Report Dept. of Operations Research, Stanford University, Stanford, CA, 1995.\n \n\n\n\n
\n\n\n\n \n \n \"Derandomizing paper\n  \n \n\n \n\n \n link\n  \n \n\n bibtex\n \n\n \n  \n \n abstract \n \n\n \n\n \n \n \n \n \n \n \n\n  \n \n \n\n\n\n
\n
@techreport{hengly95,\n\tabstract = {This technical report is meant to accompany the paper [7] and should be read in conjunction with that work􏰁. It describes several concepts that were alluded to in [7] but not elaborated on. We give algorithms for computing the derandomized estimator, introduce the concept of rerandomization, examine heuristically the question of how the derandomized and standard variance estimators behave as the splitting constants get small, and consider the use of the derandomized estimator in sequential stopping situations􏰁.},\n\taddress = {Stanford, CA},\n\tauthor = {Shane G. Henderson and Peter W. Glynn},\n\tdate-added = {2016-01-10 19:44:19 +0000},\n\tdate-modified = {2016-01-10 19:45:31 +0000},\n\tinstitution = {Dept. of Operations Research, Stanford University},\n\ttitle = {Derandomizing and rerandomizing variance estimators},\n\turl_paper = {pubs/derandrerand.pdf},\n\tyear = {1995}}\n\n
\n
\n\n\n
\n This technical report is meant to accompany the paper [7] and should be read in conjunction with that work􏰁. It describes several concepts that were alluded to in [7] but not elaborated on. We give algorithms for computing the derandomized estimator, introduce the concept of rerandomization, examine heuristically the question of how the derandomized and standard variance estimators behave as the splitting constants get small, and consider the use of the derandomized estimator in sequential stopping situations􏰁.\n
\n\n\n
\n\n\n\n\n\n
\n
\n\n\n\n\n
\n\n\n \n\n \n \n \n \n\n
\n"}; document.write(bibbase_data.data);