@inproceedings{4436, abstract = {We present an assume-guarantee interface algebra for real-time components. In our formalism a component implements a set of task sequences that share a resource. A component interface consists of an arrival rate function and a latency for each task sequence, and a capacity function for the shared resource. The interface specifies that the component guarantees certain task latencies depending on assumptions about task arrival rates and allocated resource capacities. Our algebra defines compatibility and refinement relations on interfaces. Interface compatibility can be checked on partial designs, even when some component interfaces are yet unknown. In this case interface composition computes as new assumptions the weakest constraints on the unknown components that are necessary to satisfy the specified guarantees. Interface refinement is defined in a way that ensures that compatible interfaces can be refined and implemented independently. Our algebra thus formalizes an interface-based design methodology that supports both the incremental addition of new components and the independent stepwise refinement of existing components. We demonstrate the flexibility and efficiency of the framework through simulation experiments.}, author = {Thomas Henzinger and Matic, Slobodan}, pages = {253 -- 266}, publisher = {IEEE}, title = {{An interface algebra for real-time components}}, doi = {10.1109/RTAS.2006.11}, year = {2006}, } @inproceedings{4432, abstract = {We add freeze quantifiers to the game logic ATL in order to specify real-time objectives for games played on timed structures. We define the semantics of the resulting logic TATL by restricting the players to physically meaningful strategies, which do not prevent time from diverging. We show that TATL can be model checked over timed automaton games. We also specify timed optimization problems for physically meaningful strategies, and we show that for timed automaton games, the optimal answers can be approximated to within any degree of precision.}, author = {Thomas Henzinger and Prabhu, Vinayak S}, pages = {1 -- 17}, publisher = {Springer}, title = {{Timed alternating-time temporal logic}}, doi = {10.1007/11867340_1}, volume = {4202}, year = {2006}, } @inproceedings{4431, abstract = {We summarize some current trends in embedded systems design and point out some of their characteristics, such as the chasm between analytical and computational models, and the gap between safety-critical and best-effort engineering practices. We call for a coherent scientific foundation for embedded systems design, and we discuss a few key demands on such a foundation: the need for encompassing several manifestations of heterogeneity, and the need for constructivity in design. We believe that the development of a satisfactory Embedded Systems Design Science provides a timely challenge and opportunity for reinvigorating computer science.}, author = {Thomas Henzinger and Sifakis, Joseph}, pages = {1 -- 15}, publisher = {Springer}, title = {{The embedded systems design challenge}}, doi = {10.1007/11813040_1}, volume = {4085}, year = {2006}, } @article{4451, abstract = {One source of complexity in the μ-calculus is its ability to specify an unbounded number of switches between universal (AX) and existential (EX) branching modes. We therefore study the problems of satisfiability, validity, model checking, and implication for the universal and existential fragments of the μ-calculus, in which only one branching mode is allowed. The universal fragment is rich enough to express most specifications of interest, and therefore improved algorithms are of practical importance. We show that while the satisfiability and validity problems become indeed simpler for the existential and universal fragments, this is, unfortunately, not the case for model checking and implication. We also show the corresponding results for the alternation-free fragment of the μ-calculus, where no alternations between least and greatest fixed points are allowed. Our results imply that efforts to find a polynomial-time model-checking algorithm for the μ-calculus can be replaced by efforts to find such an algorithm for the universal or existential fragment.}, author = {Thomas Henzinger and Kupferman, Orna and Majumdar, Ritankar S}, journal = {Theoretical Computer Science}, number = {2}, pages = {173 -- 186}, publisher = {Elsevier}, title = {{On the universal and existential fragments of the mu-calculus}}, doi = {10.1016/j.tcs.2005.11.015}, volume = {354}, year = {2006}, } @inproceedings{4523, abstract = {We consider the problem if a given program satisfies a specified safety property. Interesting programs have infinite state spaces, with inputs ranging over infinite domains, and for these programs the property checking problem is undecidable. Two broad approaches to property checking are testing and verification. Testing tries to find inputs and executions which demonstrate violations of the property. Verification tries to construct a formal proof which shows that all executions of the program satisfy the property. Testing works best when errors are easy to find, but it is often difficult to achieve sufficient coverage for correct programs. On the other hand, verification methods are most successful when proofs are easy to find, but they are often inefficient at discovering errors. We propose a new algorithm, Synergy, which combines testing and verification. Synergy unifies several ideas from the literature, including counterexample-guided model checking, directed testing, and partition refinement.This paper presents a description of the Synergy algorithm, its theoretical properties, a comparison with related algorithms, and a prototype implementation called Yogi.}, author = {Gulavani, Bhargav S and Thomas Henzinger and Kannan, Yamini and Nori, Aditya V and Rajamani, Sriram K}, pages = {117 -- 127}, publisher = {ACM}, title = {{Synergy: A new algorithm for property checking}}, doi = {10.1145/1181775.1181790}, year = {2006}, } @inproceedings{4526, abstract = {We designed and implemented a new programming language called Hierarchical Timing Language (HTL) for hard realtime systems. Critical timing constraints are specified within the language,and ensured by the compiler. Programs in HTL are extensible in two dimensions without changing their timing behavior: new program modules can be added, and individual program tasks can be refined. The mechanism supporting time invariance under parallel composition is that different program modules communicate at specified instances of time. Time invariance under refinement is achieved by conservative scheduling of the top level. HTL is a coordination language, in that individual tasks can be implemented in "foreign" languages. As a case study, we present a distributed HTL implementation of an automotive steer-by-wire controller.}, author = {Ghosal, Arkadeb and Thomas Henzinger and Iercan, Daniel and Kirsch, Christoph M and Sangiovanni-Vincentelli, Alberto}, pages = {132 -- 141}, publisher = {ACM}, title = {{A hierarchical coordination language for interacting real-time tasks}}, doi = {10.1145/1176887.1176907}, year = {2006}, } @inproceedings{4528, abstract = {Computational modeling of biological systems is becoming increasingly common as scientists attempt to understand biological phenomena in their full complexity. Here we distinguish between two types of biological models mathematical and computational - according to their different representations of biological phenomena and their diverse potential. We call the approach of constructing computational models of biological systems executable biology, as it focuses on the design of executable computer algorithms that mimic biological phenomena. We give an overview of the main modeling efforts in this direction, and discuss some of the new challenges that executable biology poses for computer science and biology. We argue that for executable biology to reach its full potential as a mainstream biological technique, formal and algorithmic approaches must be integrated into biological research, driving biology towards a more precise engineering discipline.}, author = {Fisher, Jasmin and Thomas Henzinger}, pages = {1675 -- 1682}, publisher = {IEEE}, title = {{Executable biology}}, doi = {10.1109/WSC.2006.322942}, year = {2006}, } @inproceedings{4539, abstract = {Games on graphs with ω-regular objectives provide a model for the control and synthesis of reactive systems. Every ω-regular objective can be decomposed into a safety part and a liveness part. The liveness part ensures that something good happens “eventually.” Two main strengths of the classical, infinite-limit formulation of liveness are robustness (independence from the granularity of transitions) and simplicity (abstraction of complicated time bounds). However, the classical liveness formulation suffers from the drawback that the time until something good happens may be unbounded. A stronger formulation of liveness, so-called finitary liveness, overcomes this drawback, while still retaining robustness and simplicity. Finitary liveness requires that there exists an unknown, fixed bound b such that something good happens within b transitions. While for one-shot liveness (reachability) objectives, classical and finitary liveness coincide, for repeated liveness (Büchi) objectives, the finitary formulation is strictly stronger. In this work we study games with finitary parity and Streett (fairness) objectives. We prove the determinacy of these games, present algorithms for solving these games, and characterize the memory requirements of winning strategies. Our algorithms can be used, for example, for synthesizing controllers that do not let the response time of a system increase without bound.}, author = {Krishnendu Chatterjee and Thomas Henzinger}, pages = {257 -- 271}, publisher = {Springer}, title = {{Finitary winning in omega-regular games}}, doi = {10.1007/11691372_17}, volume = {3920}, year = {2006}, } @inproceedings{4538, abstract = {A stochastic graph game is played by two players on a game graph with probabilistic transitions. We consider stochastic graph games with ω-regular winning conditions specified as parity objectives. These games lie in NP ∩ coNP. We present a strategy improvement algorithm for stochastic parity games; this is the first non-brute-force algorithm for solving these games. From the strategy improvement algorithm we obtain a randomized subexponential-time algorithm to solve such games.}, author = {Krishnendu Chatterjee and Thomas Henzinger}, pages = {512 -- 523}, publisher = {Springer}, title = {{Strategy improvement and randomized subexponential algorithms for stochastic parity games}}, doi = {10.1007/11672142_42}, volume = {3884}, year = {2006}, } @inproceedings{4551, abstract = {We consider Markov decision processes (MDPs) with multiple discounted reward objectives. Such MDPs occur in design problems where one wishes to simultaneously optimize several criteria, for example, latency and power. The possible trade-offs between the different objectives are characterized by the Pareto curve. We show that every Pareto-optimal point can be achieved by a memoryless strategy; however, unlike in the single-objective case, the memoryless strategy may require randomization. Moreover, we show that the Pareto curve can be approximated in polynomial time in the size of the MDP. Additionally, we study the problem if a given value vector is realizable by any strategy, and show that it can be decided in polynomial time; but the question whether it is realizable by a deterministic memoryless strategy is NP-complete. These results provide efficient algorithms for design exploration in MDP models with multiple objectives. This research was supported in part by the AFOSR MURI grant F49620-00-1-0327, and the NSF grants CCR-0225610, CCR-0234690, and CCR-0427202. }, author = {Krishnendu Chatterjee and Majumdar, Ritankar S and Thomas Henzinger}, pages = {325 -- 336}, publisher = {Springer}, title = {{Markov decision processes with multiple objectives}}, doi = {10.1007/11672142_26}, volume = {3884}, year = {2006}, } @article{4550, abstract = {In 2-player non-zero-sum games, Nash equilibria capture the options for rational behavior if each player attempts to maximize her payoff. In contrast to classical game theory, we consider lexicographic objectives: first, each player tries to maximize her own payoff, and then, the player tries to minimize the opponent's payoff. Such objectives arise naturally in the verification of systems with multiple components. There, instead of proving that each component satisfies its specification no matter how the other components behave, it sometimes suffices to prove that each component satisfies its specification provided that the other components satisfy their specifications. We say that a Nash equilibrium is secure if it is an equilibrium with respect to the lexicographic objectives of both players. We prove that in graph games with Borel winning conditions, which include the games that arise in verification, there may be several Nash equilibria, but there is always a unique maximal payoff profile of a secure equilibrium. We show how this equilibrium can be computed in the case of ω-regular winning conditions, and we characterize the memory requirements of strategies that achieve the equilibrium.}, author = {Krishnendu Chatterjee and Thomas Henzinger and Jurdziński, Marcin}, journal = {Theoretical Computer Science}, number = {1-2}, pages = {67 -- 82}, publisher = {Elsevier}, title = {{Games with secure equilibria}}, doi = {10.1016/j.tcs.2006.07.032}, volume = {365}, year = {2006}, } @inproceedings{4549, abstract = {We present a compositional theory of system verification, where specifications assign real-numbered costs to systems. These costs can express a wide variety of quantitative system properties, such as resource consumption, price, or a measure of how well a system satisfies its specification. The theory supports the composition of systems and specifications, and the hiding of variables. Boolean refinement relations are replaced by real-numbered distances between descriptions of a system at different levels of detail. We show that the classical Boolean rules for compositional reasoning have quantitative counterparts in our setting. While our general theory allows costs to be specified by arbitrary cost functions, we also consider a class of linear cost functions, which give rise to an instance of our framework where all operations are computable in polynomial time.}, author = {Krishnendu Chatterjee and de Alfaro, Luca and Faella, Marco and Thomas Henzinger and Majumdar, Ritankar S and Stoelinga, Mariëlle}, pages = {179 -- 188}, publisher = {IEEE}, title = {{Compositional quantitative reasoning}}, doi = {10.1109/QEST.2006.11}, year = {2006}, } @inproceedings{4552, abstract = {A concurrent reachability game is a two-player game played on a graph: at each state, the players simultaneously and independently select moves; the two moves determine jointly a probability distribution over the successor states. The objective for player 1 consists in reaching a set of target states; the objective for player 2 is to prevent this, so that the game is zero-sum. Our contributions are two-fold. First, we present a simple proof of the fact that in concurrent reachability games, for all epsilon > 0, memoryless epsilon-optimal strategies exist. A memoryless strategy is independent of the history of plays, and an epsilon-optimal strategy achieves the objective with probability within epsilon of the value of the game. In contrast to previous proofs of this fact, which rely on the limit behavior of discounted games using advanced Puisieux series analysis, our proof is elementary and combinatorial. Second, we present a strategy-improvement (a.k.a. policy-iteration) algorithm for concurrent games with reachability objectives.}, author = {Krishnendu Chatterjee and de Alfaro, Luca and Thomas Henzinger}, pages = {291 -- 300}, publisher = {IEEE}, title = {{Strategy improvement for concurrent reachability games}}, doi = {10.1109/QEST.2006.48}, year = {2006}, } @inproceedings{4574, abstract = {Many software model checkers are based on predicate abstraction. If the verification goal depends on pointer structures, the approach does not work well, because it is difficult to find adequate predicate abstractions for the heap. In contrast, shape analysis, which uses graph-based heap abstractions, can provide a compact representation of recursive data structures. We integrate shape analysis into the software model checker Blast. Because shape analysis is expensive, we do not apply it globally. Instead, we ensure that, like predicates, shape graphs are computed and stored locally, only where necessary for proving the verification goal. To achieve this, we extend lazy abstraction refinement, which so far has been used only for predicate abstractions, to three-valued logical structures. This approach does not only increase the precision of model checking, but it also increases the efficiency of shape analysis. We implemented the technique by extending Blast with calls to Tvla.}, author = {Beyer, Dirk and Thomas Henzinger and Théoduloz, Grégory}, pages = {532 -- 546}, publisher = {Springer}, title = {{Lazy shape analysis}}, doi = {10.1007/11817963_48}, volume = {4144}, year = {2006}, } @unpublished{573, abstract = {Mitchison and Jozsa recently suggested that the "chained-Zeno" counterfactual computation protocol recently proposed by Hosten et al. is counterfactual for only one output of the computer. This claim was based on the existing abstract algebraic definition of counterfactual computation, and indeed according to this definition, their argument is correct. However, a more general definition (physically adequate) for counterfactual computation is implicitly assumed by Hosten et. al. Here we explain in detail why the protocol is counterfactual and how the "history tracking" method of the existing description inadequately represents the physics underlying the protocol. Consequently, we propose a modified definition of counterfactual computation. Finally, we comment on one of the most interesting aspects of the error-correcting protocol. }, author = {Hosten, Onur and Rakher, Matthew and Barreiro, Julio and Peters, Nicholas and Kwiat, Paul}, pages = {12}, publisher = {ArXiv}, title = {{Counterfactual computation revisited}}, year = {2006}, } @unpublished{574, abstract = {Vaidman, in a recent article adopts the method of 'quantum weak measurements in pre- and postselected ensembles' to ascertain whether or not the chained-Zeno counterfactual computation scheme proposed by Hosten et al. is counterfactual; which has been the topic of a debate on the definition of counterfactuality. We disagree with his conclusion, which brings up some interesting aspects of quantum weak measurements and some concerns about the way they are interpreted. }, author = {Hosten, Onur and Kwiat, Paul}, pages = {2}, publisher = {ArXiv}, title = {{Weak measurements and counterfactual computation}}, year = {2006}, } @inproceedings{578, abstract = {A source of single photons allows secure quantum key distribution, in addition, to being a critical resource for linear optics quantum computing. We describe our progress on deterministically creating single photons from spontaneous parametric downconversion, an extension of the Pittman, Jacobs and Franson scheme [Phys. Rev A, v66, 042303 (2002)]. Their idea was to conditionally prepare single photons by measuring one member of a spontaneously emitted photon pair and storing the remaining conditionally prepared photon until a predetermined time, when it would be "deterministically" released from storage. Our approach attempts to improve upon this by recycling the pump pulse in order to decrease the possibility of multiple-pair generation, while maintaining a high probability of producing a single pair. Many of the challenges we discuss are central to other quantum information technologies, including the need for low-loss optical storage, switching and detection, and fast feed-forward control.}, author = {Peters, Nicholas A and Arnold, Keith J and VanDevender, Aaron P and Jeffrey, Evan R and Rangarajan, Radhika and Onur Hosten and Barreiro, Julio T and Altepeter, Joseph B and Kwiat, Paul G}, publisher = {SPIE}, title = {{Towards a quasi-deterministic single-photon source}}, doi = {10.1117/12.684702}, volume = {6305}, year = {2006}, } @inproceedings{577, abstract = {Visible light photon counters (VLPCs) and solid-state photomultipliers (SSPMs) are high-efficiency single-photon detectors which have multi-photon counting capability. While both the VLPCs and the SSPMs have inferred internal quantum efficiencies above 93%, the actual measured values for both the detectors were in fact limited to less than 88%, attributed to in-coupling losses. We are currently improving this overall detection efficiency via a) custom anti-reflection coating the detectors and the in-coupling fibers, b) implementing a novel cryogenic design to reduce transmission losses and, c) using low-noise electronics to obtain a better signal-to-noise ratio.}, author = {Rangarajan, Radhika and Altepeter, Joseph B and Jeffrey, Evan R and Stoutimore, Micah J and Peters, Nicholas A and Onur Hosten and Kwiat, Paul G}, publisher = {SPIE}, title = {{High-efficiency single-photon detectors}}, doi = {10.1117/12.686117}, volume = {6372}, year = {2006}, } @article{579, abstract = {The logic underlying the coherent nature of quantum information processing often deviates from intuitive reasoning, leading to surprising effects. Counterfactual computation constitutes a striking example: the potential outcome of a quantum computation can be inferred, even if the computer is not run 1. Relying on similar arguments to interaction-free measurements 2 (or quantum interrogation3), counterfactual computation is accomplished by putting the computer in a superposition of 'running' and 'not running' states, and then interfering the two histories. Conditional on the as-yet-unknown outcome of the computation, it is sometimes possible to counterfactually infer information about the solution. Here we demonstrate counterfactual computation, implementing Grover's search algorithm with an all-optical approach4. It was believed that the overall probability of such counterfactual inference is intrinsically limited1,5, so that it could not perform better on average than random guesses. However, using a novel 'chained' version of the quantum Zeno effect6, we show how to boost the counterfactual inference probability to unity, thereby beating the random guessing limit. Our methods are general and apply to any physical system, as illustrated by a discussion of trapped-ion systems. Finally, we briefly show that, in certain circumstances, counterfactual computation can eliminate errors induced by decoherence. }, author = {Onur Hosten and Rakher, Matthew T and Barreiro, Julio T and Peters, Nicholas A and Kwiat, Paul G}, journal = {Nature}, number = {7079}, pages = {949 -- 952}, publisher = {Nature Publishing Group}, title = {{Counterfactual quantum computation through quantum interrogation}}, doi = {10.1038/nature04523}, volume = {439}, year = {2006}, } @inproceedings{583, abstract = {Visible light photon counters (VLPCs) and solid-state photomultipliers (SSPMs) facilitate efficient single-photon detection. We are attempting to improve their efficiency, previously limited to < 88% by coupling losses, via anti-reflection coatings, better electronics and cryogenics.}, author = {Rangarajan, Radhika and Peters, Nicholas A and Onur Hosten and Altepeter, Joseph B and Jeffrey, Evan R and Kwiat, Paul G}, publisher = {IEEE}, title = {{Improved single-photon detection}}, doi = {10.1109/CLEO.2006.4628641}, year = {2006}, } @article{6151, author = {Salecker, Iris and Häusser, Michael and de Bono, Mario}, issn = {1469-221X}, journal = {EMBO reports}, number = {6}, pages = {585--589}, publisher = {Wiley}, title = {{On the axonal road to circuit function and behaviour: Workshop on the assembly and function of neuronal circuits}}, doi = {10.1038/sj.embor.7400713}, volume = {7}, year = {2006}, } @article{6152, author = {Rogers, Candida and Persson, Annelie and Cheung, Benny and de Bono, Mario}, issn = {0960-9822}, journal = {Current Biology}, number = {7}, pages = {649--659}, publisher = {Elsevier}, title = {{Behavioral motifs and neural pathways coordinating O2 responses and aggregation in C. elegans}}, doi = {10.1016/j.cub.2006.03.023}, volume = {16}, year = {2006}, } @article{7328, abstract = {An experimental technique for measuring the current density distribution with a resolution smaller than the channel/rib scale of the flow field in polymer electrolyte fuel cells (PEFCs) is presented. The electron conductors in a plane perpendicular to the channel direction are considered as two-dimensional resistors. Hence, the current density is obtained from the solution of Laplace's equation with the potentials at current collector and reaction layer as boundary conditions. Using ohmic drop for calculating the local current, detailed knowledge of all resistances involved is of prime importance. In particular, the contact resistance between the gas diffusion layer (GDL) and flow field rib, as well as GDL bulk conductivity, are strongly dependent on clamping pressure. They represent a substantial amount of the total ohmic drop and therefore require careful consideration. The detailed experimental setup as well as the concise procedure for quantitative data evaluation is described. Finally, the method is applied successfully to a cell operated on pure oxygen and air up to high current densities. The results show that electrical and ionic resistances seem to govern the current distribution at low current regimes, whereas mass transport limitations locally hamper the current production at high loads.}, author = {Freunberger, Stefan Alexander and Reum, Mathias and Evertz, Jörg and Wokaun, Alexander and Büchi, Felix N.}, issn = {0013-4651}, journal = {Journal of The Electrochemical Society}, number = {11}, publisher = {The Electrochemical Society}, title = {{Measuring the current distribution in PEFCs with sub-millimeter resolution}}, doi = {10.1149/1.2345591}, volume = {153}, year = {2006}, } @article{7327, abstract = {Propagation of performance changes to adjacent cells in polymer electrolyte fuel cell stacks is studied by means of voltage monitoring and local current density measurements in peripheral cells of the stack. A technical fuel cell stack has been modified by implementing two independent reactant and coolant supplies in order to deliberately change the performance of one cell (anomalous cell) and study the coupling phenomena to adjacent cells (coupling cells), while keeping the working conditions of the later cell-group unaltered. Two anomalies are studied: (i) air starvation and (ii) thermal anomaly, in a single anomalous cell in the stack and their coupling to adjacent cells. The results have shown that anomalies inducing considerable changes in the local current density of the anomalous cell (such as air starvation) propagate to adjacent cells affecting their performance. The propagation of local current density changes takes place via the common bipolar plate due to its finite thickness and in-plane conductivity. Consequently, anomalies which do not strongly influence the local current density distribution (such as a thermal anomaly under the studied working conditions) do not propagate to adjacent cells.}, author = {Santis, Marco and Freunberger, Stefan Alexander and Papra, Matthias and Wokaun, Alexander and Büchi, Felix N.}, issn = {0378-7753}, journal = {Journal of Power Sources}, number = {2}, pages = {1076--1083}, publisher = {Elsevier}, title = {{Experimental investigation of coupling phenomena in polymer electrolyte fuel cell stacks}}, doi = {10.1016/j.jpowsour.2006.06.007}, volume = {161}, year = {2006}, } @inproceedings{7326, abstract = {Often the properties of a single cell are considered as representative for a complete polymer electrolyte fuel cell stack or even a fuel cell system. In some cases this comes close, however, in many real cases differences on several scales become important. Cell interaction phenomena in fuel cell stacks that arise from inequalities between adjacent cells are investigated in detail experimentally. For that, a specialized 2-cell stack with advanced localized diagnostics was developed. The results show that inequalities propagate by electrical coupling, inhomogeneous cell polarization and inducing in-plane current in the common bipolar plate. The effects of the different loss-mechanisms are analyzed and quantified. }, author = {Büchi, Felix N. and Freunberger, Stefan Alexander and Santis, Marco}, booktitle = {ECS Transactions}, location = {Cancun, Mexico}, number = {1}, pages = {963--968}, publisher = {ECS}, title = {{What is learned beyond the scale of single cells?}}, doi = {10.1149/1.2356215}, volume = {3}, year = {2006}, } @article{7329, abstract = {A novel measurement principle for measuring the current distribution in polymer electrolyte fuel cells (PEFCs) is introduced. It allows, in contrast to all other known techniques, for the first time for a resolution smaller than the channel/rib scale of the flow field in PEFCs. The current density is obtained by considering the electron conductors in the cell as a two-dimensional resistor with the voltage drop caused by the current. The method was applied to a cell operated on oxygen up to high current densities. The results show that the ohmic resistances govern the current distribution in the low current regime, whereas mass transport limitations hamper the current production under the land at high loads.}, author = {Freunberger, Stefan Alexander and Reum, Mathias and Wokaun, Alexander and Büchi, Felix N.}, issn = {1388-2481}, journal = {Electrochemistry Communications}, number = {9}, pages = {1435--1438}, publisher = {Elsevier}, title = {{Expanding current distribution measurement in PEFCs to sub-millimeter resolution}}, doi = {10.1016/j.elecom.2006.05.032}, volume = {8}, year = {2006}, } @article{7330, abstract = {Polymer electrolyte fuel cells (PE fuel cells) working with air at low stoichiometries (<2.0) and standard electrochemical components show a high degree of inhomogeneity in the current density distribution over the active area. An inhomogeneous current density distribution leads to a non-uniform utilization of the active area, which could negatively affect the time of life of the cells. Furthermore, it is also believed to lower cell performance. In this work, the homogenization of the current density, realized by means of tailored cathodes with along-the-air-channel redistributed catalyst loadings, is investigated. The air stoichiometry range for which a homogenization of the current density is achieved depends upon the gradient with which the catalyst is redistributed along the air channel. A gentle increasing catalyst loading profile homogenizes the current density at relatively higher air stoichiometries, while a steeper profile is suited better for lower air stoichiometries. The results show that a homogenization of the current density by means of redistributed catalyst loading has negative effects on cell performance. Model calculations corroborate the experimental findings on homogenization of the current density and deliver an explanation for the decrease in cell performance.}, author = {Santis, M. and Freunberger, Stefan Alexander and Reiner, A. and Büchi, F.N.}, issn = {0013-4686}, journal = {Electrochimica Acta}, number = {25}, pages = {5383--5393}, publisher = {Elsevier}, title = {{Homogenization of the current density in polymer electrolyte fuel cells by in-plane cathode catalyst gradients}}, doi = {10.1016/j.electacta.2006.02.008}, volume = {51}, year = {2006}, } @article{7332, abstract = {A quasi-two-dimensional, along-the-channel mass and heat-transfer model for a proton exchange membrane fuel cell (PEFC) is described and validated against experimental current distribution data. The model is formulated in a dimensional manner, i.e., local transport phenomena are treated one-dimensional in through-plane direction and coupled in-plane by convective transport in the gas and coolant channels. Thus, a two-dimensional slice running through the repetitive unit of a cell from the anode channel via membrane-electrode assembly (MEA) and cathode channel to the coolant channel and from inlet to outlet is modeled. The aim of the work is to elucidate the influence of operating conditions such as feed gas humidities and stoichiometric ratios on the along-the-channel current density distribution and to identify the distinct underlying voltage loss mechanisms. Furthermore, a complicated technical flow field is modeled by a combination of co- and counterflow subdomains and compared with experimental current densities.}, author = {Freunberger, Stefan Alexander and Santis, Marco and Schneider, Ingo A. and Wokaun, Alexander and Büchi, Felix N.}, issn = {0013-4651}, journal = {Journal of The Electrochemical Society}, number = {2}, publisher = {The Electrochemical Society}, title = {{In-plane effects in large-scale PEMFCs}}, doi = {10.1149/1.2150150}, volume = {153}, year = {2006}, } @article{7331, abstract = {A previously developed mathematical model for water management and current density distribution in a polymer electrolyte fuel cell (PEFCs) is employed to investigate the effects of cooling strategies on cell performance. The model describes a two-dimensional slice through the cell along the channels and through the entire cell sandwich including the coolant channels and the bipolar plate. Arbitrary flow arrangements of fuel, oxidant, and coolant stream directions can be described. Due to the serious impact of temperature on all processes in the PEFC, both the relative direction of the coolant stream to the gas streams and its mass flow turns out to significantly affect the cell performance. Besides influencing the electrochemical reaction and all kinds of mass transfer temperature, variations predominantly alter the local membrane hydration distribution and subseqently its conductivity.}, author = {Freunberger, Stefan Alexander and Wokaun, Alexander and Büchi, Felix N.}, issn = {0013-4651}, journal = {Journal of The Electrochemical Society}, number = {5}, publisher = {The Electrochemical Society}, title = {{In-plane effects in large-scale PEFCs: II. The influence of cooling strategy on cell performance}}, doi = {10.1149/1.2185282}, volume = {153}, year = {2006}, } @article{854, abstract = {Phylogenetic relationships between the extinct woolly mammoth (Mammuthus primigenius), and the Asian (Elephas maximus) and African savanna (Loxodonta africana) elephants remain unresolved. Here, we report the sequence of the complete mitochondrial genome (16,842 base pairs) of a woolly mammoth extracted from permafrost-preserved remains from the Pleistocene epoch - the oldest mitochondrial genome sequence determined to date. We demonstrate that well-preserved mitochondrial genome fragments, as long as ∼1,600-1700 base pairs, can be retrieved from pre-Holocene remains of an extinct species. Phylogenetic reconstruction of the Elephantinae clade suggests that M. primigenius and E. maximus are sister species that diverged soon after their common ancestor split from the L. africana lineage. Low nucleotide diversity found between independently determined mitochondrial genomic sequences of woolly mammoths separated geographically and in time suggests that north-eastern Siberia was occupied by a relatively homogeneous population of M. primigenius throughout the late Pleistocene.}, author = {Rogaev, Evgeny I and Moliaka, Yuri K and Malyarchuk, Boris A and Fyodor Kondrashov and Derenko, Miroslava V and Chumakov, Ilya M and Grigorenko, Anastasia P}, journal = {PLoS Biology}, number = {3}, pages = {0403 -- 0410}, publisher = {Public Library of Science}, title = {{Complete mitochondrial genome and phylogeny of pleistocene mammoth Mammuthus primigenius}}, doi = {10.1371/journal.pbio.0040073}, volume = {4}, year = {2006}, } @article{868, abstract = {Background: The glyoxylate cycle is thought to be present in bacteria, protists, plants, fungi, and nematodes, but not in other Metazoa. However, activity of the glyoxylate cycle enzymes, malate synthase (MS) and isocitrate lyase (ICL), in animal tissues has been reported. In order to clarify the status of the MS and ICL genes in animals and get an insight into their evolution, we undertook a comparative-genomic study. Results: Using sequence similarity searches, we identified MS genes in arthropods, echinoderms, and vertebrates, including platypus and opossum, but not in the numerous sequenced genomes of placental mammals. The regions of the placental mammals' genomes expected to code for malate synthase, as determined by comparison of the gene orders in vertebrate genomes, show clear similarity to the opossum MS sequence but contain stop codons, indicating that the MS gene became a pseudogene in placental mammals. By contrast, the ICL gene is undetectable in animals other than the nematodes that possess a bifunctional, fused ICL-MS gene. Examination of phylogenetic trees of MS and ICL suggests multiple horizontal gene transfer events that probably went in both directions between several bacterial and eukaryotic lineages. The strongest evidence was obtained for the acquisition of the bifunctional ICL-MS gene from an as yet unknown bacterial source with the corresponding operonic organization by the common ancestor of the nematodes. Conclusion: The distribution of the MS and ICL genes in animals suggests that either they encode alternative enzymes of the glyoxylate cycle that are not orthologous to the known MS and ICL or the animal MS acquired a new function that remains to be characterized. Regardless of the ultimate solution to this conundrum, the genes for the glyoxylate cycle enzymes present a remarkable variety of evolutionary events including unusual horizontal gene transfer from bacteria to animals.}, author = {Fyodor Kondrashov and Koonin, Eugene V and Morgunov, Igor G and Finogenova, Tatiana V and Kondrashova, Marie N}, journal = {Biology Direct}, publisher = {BioMed Central}, title = {{Evolution of glyoxylate cycle enzymes in Metazoa Evidence of multiple horizontal transfer events and pseudogene formation}}, doi = {10.1186/1745-6150-1-31}, volume = {1}, year = {2006}, } @article{873, abstract = {New genes commonly appear through complete or partial duplications of pre-existing genes. Duplications of long DNA segments are constantly produced by rare mutations, may become fixed in a population by selection or random drift, and are subject to divergent evolution of the paralogous sequences after fixation, although gene conversion can impede this process. New data shed some light on each of these processes. Mutations which involve duplications can occur through at least two different mechanisms, backward strand slippage during DNA replication and unequal crossing-over. The background rate of duplication of a complete gene in humans is 10-9-10-10 per generation, although many genes located within hot-spots of large-scale mutation are duplicated much more often. Many gene duplications affect fitness strongly, and are responsible, through gene dosage effects, for a number of genetic diseases. However, high levels of intrapopulation polymorphism caused by presence or absence of long, gene-containing DNA segments imply that some duplications are not under strong selection. The polymorphism to fixation ratios appear to be approximately the same for gene duplications and for presumably selectively neutral nucleotide substitutions, which, according to the McDonald-Kreitman test, is consistent with selective neutrality of duplications. However, this pattern can also be due to negative selection against most of segregating duplications and positive selection for at least some duplications which become fixed. Patterns in post-fixation evolution of duplicated genes do not easily reveal the causes of fixations. Many gene duplications which became fixed recently in a variety of organisms were positively selected because the increased expression of the corresponding genes was beneficial. The effects of gene dosage provide a unified framework for studying all phases of the life history of a gene duplication. Application of well-known methods of evolutionary genetics to accumulating data on new, polymorphic, and fixed duplication will enhance our understanding of the role of natural selection in the evolution by gene duplication.}, author = {Fyodor Kondrashov and Kondrashov, Alexey S}, journal = {Journal of Theoretical Biology}, number = {2}, pages = {141 -- 151}, publisher = {Elsevier}, title = {{Role of selection in fixation of gene duplications}}, doi = {10.1016/j.jtbi.2005.08.033}, volume = {239}, year = {2006}, } @article{8489, abstract = {Structure elucidation of proteins by either NMR or X‐ray crystallography often requires the screening of a large number of samples for promising protein constructs and optimum solution conditions. For large‐scale screening of protein samples in solution, robust methods are needed that allow a rapid assessment of the folding of a polypeptide under diverse sample conditions. Here we present HET‐SOFAST NMR, a highly sensitive new method for semi‐quantitative characterization of the structural compactness and heterogeneity of polypeptide chains in solution. On the basis of one‐dimensional 1H HET‐SOFAST NMR data, obtained on well‐folded, molten globular, partially‐ and completely unfolded proteins, we define empirical thresholds that can be used as quantitative benchmarks for protein compactness. For 15N‐enriched protein samples, two‐dimensional 1H‐15N HET‐SOFAST correlation spectra provide site‐specific information about the structural heterogeneity along the polypeptide chain.}, author = {Schanda, Paul and Forge, Vincent and Brutscher, Bernhard}, issn = {0749-1581}, journal = {Magnetic Resonance in Chemistry}, number = {S1}, pages = {S177--S184}, publisher = {Wiley}, title = {{HET-SOFAST NMR for fast detection of structural compactness and heterogeneity along polypeptide chains}}, doi = {10.1002/mrc.1825}, volume = {44}, year = {2006}, } @article{8488, abstract = {We demonstrate for different protein samples that three-dimensional HNCO and HNCA correlation spectra may be recorded in a few minutes acquisition time using the band-selective excitation short-transient sequences presented here. This opens new perspectives for the NMR structural investigation of unstable protein samples and real-time site-resolved studies of protein kinetics.}, author = {Schanda, Paul and Van Melckebeke, Hélène and Brutscher, Bernhard}, issn = {0002-7863}, journal = {Journal of the American Chemical Society}, keywords = {Colloid and Surface Chemistry, Biochemistry, General Chemistry, Catalysis}, number = {28}, pages = {9042--9043}, publisher = {American Chemical Society}, title = {{Speeding up three-dimensional protein NMR experiments to a few minutes}}, doi = {10.1021/ja062025p}, volume = {128}, year = {2006}, } @article{8490, abstract = {We demonstrate the feasibility of recording 1H–15N correlation spectra of proteins in only one second of acquisition time. The experiment combines recently proposed SOFAST-HMQC with Hadamard-type 15N frequency encoding. This allows site-resolved real-time NMR studies of kinetic processes in proteins with an increased time resolution. The sensitivity of the experiment is sufficient to be applicable to a wide range of molecular systems available at millimolar concentration on a high magnetic field spectrometer.}, author = {Schanda, Paul and Brutscher, Bernhard}, issn = {1090-7807}, journal = {Journal of Magnetic Resonance}, keywords = {Nuclear and High Energy Physics, Biophysics, Biochemistry, Condensed Matter Physics}, number = {2}, pages = {334--339}, publisher = {Elsevier}, title = {{Hadamard frequency-encoded SOFAST-HMQC for ultrafast two-dimensional protein NMR}}, doi = {10.1016/j.jmr.2005.10.007}, volume = {178}, year = {2006}, } @article{8514, abstract = {We study the extent to which the Hausdorff dimension of a compact subset of an infinite-dimensional Banach space is affected by a typical mapping into a finite-dimensional space. It is possible that the dimension drops under all such mappings, but the amount by which it typically drops is controlled by the ‘thickness exponent’ of the set, which was defined by Hunt and Kaloshin (Nonlinearity12 (1999), 1263–1275). More precisely, let $X$ be a compact subset of a Banach space $B$ with thickness exponent $\tau$ and Hausdorff dimension $d$. Let $M$ be any subspace of the (locally) Lipschitz functions from $B$ to $\mathbb{R}^{m}$ that contains the space of bounded linear functions. We prove that for almost every (in the sense of prevalence) function $f \in M$, the Hausdorff dimension of $f(X)$ is at least $\min\{ m, d / (1 + \tau) \}$. We also prove an analogous result for a certain part of the dimension spectra of Borel probability measures supported on $X$. The factor $1 / (1 + \tau)$ can be improved to $1 / (1 + \tau / 2)$ if $B$ is a Hilbert space. Since dimension cannot increase under a (locally) Lipschitz function, these theorems become dimension preservation results when $\tau = 0$. We conjecture that many of the attractors associated with the evolution equations of mathematical physics have thickness exponent zero. We also discuss the sharpness of our results in the case $\tau > 0$.}, author = {OTT, WILLIAM and HUNT, BRIAN and Kaloshin, Vadim}, issn = {0143-3857}, journal = {Ergodic Theory and Dynamical Systems}, number = {3}, pages = {869--891}, publisher = {Cambridge University Press}, title = {{The effect of projections on fractal sets and measures in Banach spaces}}, doi = {10.1017/s0143385705000714}, volume = {26}, year = {2006}, } @inproceedings{8515, abstract = {We consider the evolution of a set carried by a space periodic incompressible stochastic flow in a Euclidean space. We report on three main results obtained in [8, 9, 10] concerning long time behaviour for a typical realization of the stochastic flow. First, at time t most of the particles are at a distance of order √t away from the origin. Moreover, we prove a Central Limit Theorem for the evolution of a measure carried by the flow, which holds for almost every realization of the flow. Second, we show the existence of a zero measure full Hausdorff dimension set of points, which escape to infinity at a linear rate. Third, in the 2-dimensional case, we study the set of points visited by the original set by time t. Such a set, when scaled down by the factor of t, has a limiting non random shape.}, author = {Kaloshin, Vadim and DOLGOPYAT, D. and KORALOV, L.}, booktitle = {XIVth International Congress on Mathematical Physics}, isbn = {9789812562012}, location = {Lisbon, Portugal}, pages = {290--295}, publisher = {World Scientific}, title = {{Long time behaviour of periodic stochastic flows}}, doi = {10.1142/9789812704016_0026}, year = {2006}, } @article{8513, author = {Kaloshin, Vadim and Saprykina, Maria}, issn = {1553-5231}, journal = {Discrete & Continuous Dynamical Systems - A}, number = {2}, pages = {611--640}, publisher = {American Institute of Mathematical Sciences (AIMS)}, title = {{Generic 3-dimensional volume-preserving diffeomorphisms with superexponential growth of number of periodic orbits}}, doi = {10.3934/dcds.2006.15.611}, volume = {15}, year = {2006}, } @article{869, abstract = {The impact of synonymous nucleotide substitutions on fitness in mammals remains controversial. Despite some indications of selective constraint, synonymous sites are often assumed to be neutral, and the rate of their evolution is used as a proxy for mutation rate. We subdivide all sites into four classes in terms of the mutable CpG context, nonCpG, postC, preG, and postCpreG, and compare four-fold synonymous sites and intron sites residing outside transposable elements. The distribution of the rate of evolution across all synonymous sites is trimodal. Rate of evolution at nonCpG synonymous sites, not preceded by C and not followed by G, is ∼10% below that at such intron sites. In contrast, rate of evolution at postCpreG synonymous sites is ∼30% above that at such intron sites. Finally, synonymous and intron postC and preG sites evolve at similar rates. The relationship between the levels of polymorphism at the corresponding synonymous and intron sites is very similar to that between their rates of evolution. Within every class, synonymous sites are occupied by G or C much more often than intron sites, whose nucleotide composition is consistent with neutral mutation-drift equilibrium. These patterns suggest that synonymous sites are under weak selection in favor of G and C, with the average coefficient s∼0.25/Ne∼10-5, where Ne is the effective population size. Such selection decelerates evolution and reduces variability at sites with symmetric mutation, but has the opposite effects at sites where the favored nucleotides are more mutable. The amino-acid composition of proteins dictates that many synonymous sites are CpGprone, which causes them, on average, to evolve faster and to be more polymorphic than intron sites. An average genotype carries ∼107 suboptimal nucleotides at synonymous sites, implying synergistic epistasis in selection against them.}, author = {Fyodor Kondrashov and Ogurtsov, Aleksey Yu and Kondrashov, Alexey S}, journal = {Journal of Theoretical Biology}, number = {4}, pages = {616 -- 626}, publisher = {Elsevier}, title = {{Selection in favor of nucleotides G and C diversifies evolution rates and levels of polymorphism at mammalian synonymous sites}}, doi = {10.1016/j.jtbi.2005.10.020}, volume = {240}, year = {2006}, } @article{903, abstract = {Background: Carcinogenesis typically involves multiple somatic mutations in caretaker (DNA repair) and gatekeeper (tumor suppressors and oncogenes) genes. Analysis of mutation spectra of the tumor suppressor that is most commonly mutated in human cancers, p53, unexpectedly suggested that somatic evolution of the p53 gene during tumorigenesis is dominated by positive selection for gain of function. This conclusion is supported by accumulating experimental evidence of evolution of new functions of p53 in tumors. These findings prompted a genome-wide analysis of possible positive selection during tumor evolution. Methods: A comprehensive analysis of probable somatic mutations in the sequences of Expressed Sequence Tags (ESTs) from malignant tumors and normal tissues was performed in order to access the prevalence of positive selection in cancer evolution. For each EST, the numbers of synonymous and non-synonymous substitutions were calculated. In order to identify genes with a signature of positive selection in cancers, these numbers were compared to: i) expected numbers and ii) the numbers for the respective genes in the ESTs from normal tissues. Results: We identified 112 genes with a signature of positive selection in cancers, i.e., a significantly elevated ratio of non-synonymous to synonymous substitutions, in tumors as compared to 37 such genes in an approximately equal-sized EST collection from normal tissues. A substantial fraction of the tumor-specific positive-selection candidates have experimentally demonstrated or strongly predicted links to cancer. Conclusion: The results of EST analysis should be interpreted with extreme caution given the noise introduced by sequencing errors and undetected polymorphisms. Furthermore, an inherent limitation of EST analysis is that multiple mutations amenable to statistical analysis can be detected only in relatively highly expressed genes. Nevertheless, the present results suggest that positive selection might affect a substantial number of genes during tumorigenic somatic evolution.}, author = {Babenko, Vladimir N and Basu, Malay K and Fyodor Kondrashov and Rogozin, Igor B and Koonin, Eugene V}, journal = {BMC Cancer}, publisher = {BioMed Central}, title = {{Signs of positive selection of somatic mutations in human cancers detected by EST sequence analysis}}, doi = {10.1186/1471-2407-6-36}, volume = {6}, year = {2006}, } @article{9505, abstract = {Cytosine methylation, a common form of DNA modification that antagonizes transcription, is found at transposons and repeats in vertebrates, plants and fungi. Here we have mapped DNA methylation in the entire Arabidopsis thaliana genome at high resolution. DNA methylation covers transposons and is present within a large fraction of A. thaliana genes. Methylation within genes is conspicuously biased away from gene ends, suggesting a dependence on RNA polymerase transit. Genic methylation is strongly influenced by transcription: moderately transcribed genes are most likely to be methylated, whereas genes at either extreme are least likely. In turn, transcription is influenced by methylation: short methylated genes are poorly expressed, and loss of methylation in the body of a gene leads to enhanced transcription. Our results indicate that genic transcription and DNA methylation are closely interwoven processes.}, author = {Zilberman, Daniel and Gehring, Mary and Tran, Robert K. and Ballinger, Tracy and Henikoff, Steven}, issn = {1546-1718}, journal = {Nature Genetics}, number = {1}, pages = {61--69}, publisher = {Nature Publishing Group}, title = {{Genome-wide analysis of Arabidopsis thaliana DNA methylation uncovers an interdependence between methylation and transcription}}, doi = {10.1038/ng1929}, volume = {39}, year = {2006}, } @article{13430, abstract = {Dynamic self-assembly (DySA) processes occurring outside of thermodynamic equilibrium underlie many forms of adaptive and intellligent behaviors in natural systems. Relatively little, however, is known about the principles that govern DySA and the ways in which it can be extended to artificial ensembles. This article discusses recent advances in both the theory and the practice of nonequilibrium self-assembly. It is argued that a union of ideas from thermodynamics and dynamic systems' theory can provide a general description of DySA. In parallel, heuristic design rules can be used to construct DySA systems of increasing complexities based on a variety of suitable interactions/potentials on length scales from nanoscopic to macroscopic. Applications of these rules to magnetohydrodynamic DySA are also discussed.}, author = {Fialkowski, Marcin and Bishop, Kyle J. M. and Klajn, Rafal and Smoukov, Stoyan K. and Campbell, Christopher J. and Grzybowski, Bartosz A.}, issn = {1520-6106}, journal = {The Journal of Physical Chemistry B}, keywords = {Materials Chemistry, Surfaces, Coatings and Films, Physical and Theoretical Chemistry}, number = {6}, pages = {2482--2496}, publisher = {American Chemical Society}, title = {{Principles and implementations of dissipative (dynamic) self-assembly}}, doi = {10.1021/jp054153q}, volume = {110}, year = {2006}, } @article{13428, abstract = {Mixtures of oppositely charged nanoparticles of various sizes and charge ratios precipitate only at the point of electroneutrality. This phenomenonspecific to the nanoscale and reminiscent of threshold precipitation of ionsis a consequence of the formation of core-and-shell nanoparticle aggregates, in which the shells are composed of like-charged particles and are stabilized by efficient electrostatic screening.}, author = {Kalsin, Alexander M. and Kowalczyk, Bartlomiej and Smoukov, Stoyan K. and Klajn, Rafal and Grzybowski, Bartosz A.}, issn = {1520-5126}, journal = {Journal of the American Chemical Society}, keywords = {Colloid and Surface Chemistry, Biochemistry, General Chemistry, Catalysis}, number = {47}, pages = {15046--15047}, publisher = {American Chemical Society}, title = {{Ionic-like behavior of oppositely charged nanoparticles}}, doi = {10.1021/ja0642966}, volume = {128}, year = {2006}, } @article{13429, abstract = {The fruitful core: Organic syntheses reported in the literature from 1850 to 2004 are analyzed with mathematical tools from network theory and statistical physics. There is a set of substances (the core) from which the majority of other organic compounds can be made (see picture; red: core, blue: periphery, green: islands). Search algorithms are used to identify small optimal sets of maximally useful chemicals.}, author = {Bishop, Kyle J. M. and Klajn, Rafal and Grzybowski, Bartosz A.}, issn = {1521-3773}, journal = {Angewandte Chemie International Edition}, keywords = {General Chemistry, Catalysis}, number = {32}, pages = {5348--5354}, publisher = {Wiley}, title = {{The core and most useful molecules in organic chemistry}}, doi = {10.1002/anie.200600881}, volume = {45}, year = {2006}, }