text
stringlengths
18
215k
b'-3 + (-6 - -7) + 12 + -21\n'
/** * Copyright (c) 2015, Alexander Orzechowski. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ /** * Currently in beta stage. Changes can and will be made to the core mechanic * making this not backwards compatible. * * Github: https://github.com/Need4Speed402/tessellator */ Tessellator.Model.prototype.drawObject = Tessellator.Model.prototype.add;
b'-6 + 2 + 7 - (-38 - -55) - 1\n'
b'What is the value of -21 + 5 + (10 - -14) + -18?\n'
'use strict'; function NavalMap(canvasId, imageMapUrl, imageCompassUrl, config) { this.canvas = document.getElementById(canvasId); this.imageMap = new Image(); this.imageCompass = new Image(); this.config = config; this.itemsLoaded = false; this.nationsLoaded = false; this.shopsLoaded = false; this.portsLoaded = false; this.imageMapLoaded = false; this.imageCompassLoaded = false; this.init(imageMapUrl, imageCompassUrl); } NavalMap.prototype.init = function init(imageMapUrl, imageCompassUrl) { var self = this; this.loadEverything(imageMapUrl, imageCompassUrl, function () { var stage = new createjs.Stage(self.canvas); createjs.Touch.enable(stage); stage.enableMouseOver(5); stage.tickEnabled = false; //createjs.Ticker.framerate = 60; createjs.Ticker.timingMode = createjs.Ticker.RAF; self.map = new Map(self.canvas, stage, self.imageMap, self.imageCompass, self.config); }); }; NavalMap.prototype.loadImageMap = function loadImageMap(url, cb) { this.imageMap.src = url; var self = this; this.imageMap.onload = function () { self.imageMapLoaded = true; if (self.checkEverethingIsLoaded()) { if(cb) { cb(); } } }; }; NavalMap.prototype.loadImageCompass = function loadImageCompass(url, cb) { this.imageCompass.src = url; var self = this; this.imageCompass.onload = function () { self.imageCompassLoaded = true; if (self.checkEverethingIsLoaded()) { if(cb) { cb(); } } }; }; NavalMap.prototype.checkEverethingIsLoaded = function () { return this.itemsLoaded && this.nationsLoaded && this.shopsLoaded && this.portsLoaded && this.imageMapLoaded && this.imageCompassLoaded; }; NavalMap.prototype.loadItems = function(cb) { var self = this; $.getScript("items.php").done(function(){ self.itemsLoaded = true; if (self.checkEverethingIsLoaded()) { if(cb) { cb(); } } }); }; NavalMap.prototype.loadNations = function(cb) { var self = this; $.getScript("nations.php").done(function(){ self.nationsLoaded = true; if (self.checkEverethingIsLoaded()) { if(cb) { cb(); } } }); }; NavalMap.prototype.loadShops = function(cb) { var self = this; $.getScript("shops.php").done(function(){ self.shopsLoaded = true; if (self.checkEverethingIsLoaded()) { if(cb) { cb(); } } }); }; NavalMap.prototype.loadPorts = function(cb) { var self = this; $.getScript("ports.php").done(function(){ self.portsLoaded = true; if (self.checkEverethingIsLoaded()) { if(cb) { cb(); } } }); }; NavalMap.prototype.loadEverything = function loadEverything(urlMap, urlCompass, cb) { this.loadImageMap(urlMap, cb); this.loadImageCompass(urlCompass, cb); this.loadShops(cb); this.loadItems(cb); this.loadPorts(cb); this.loadNations(cb); }; function Map(canvas, stage, imageMap, imageCompass, config) { this.canvas = canvas; this.config = config; this.stage = stage; this.globalContainer = new createjs.Container(); this.mapContainer = new createjs.Container(); this.unmodifiedMapContainer = {}; this.compass = new Compass(imageCompass, config); this.update = false; this.alreadyZooming = false; this.gpsCursor = undefined; this.statistics = {}; this.fpsLabel = new createjs.Text("-- fps", "bold 18px Arial", "black"); this.init(imageMap); } Map.prototype.init = function (imageMap) { this.stage.addChild(this.globalContainer); this.stage.addChild(this.fpsLabel); this.fpsLabel.x = 240; this.fpsLabel.y = 10; this.globalContainer.addChild(this.mapContainer); this.globalContainer.addChild(this.compass); this.mapContainer.addChild(new createjs.Bitmap(imageMap)); this.mapContainer.hasBeenDblClicked = false; this.initContainerMap(); this.resizeCanvas(this); this.createAllEvents(); var self = this; Nations.Nations.forEach(function(nation) { self.statistics[nation.Name] = 0; }); this.addPorts(); this.stage.update(); self.tickEvent(); setTimeout(function() { $("#progress-bar-load").hide(); $(".top-nav").removeClass('hide'); $("#port-information").removeClass('hide'); $("#how-to-use").removeClass('hide'); },600); //this.update = true; }; Map.prototype.initContainerMap = function () { this.setScale(this.config.map.scale); this.centerTo(this.config.map.x, this.config.map.y); var self = this; this.mapContainer.addLine = function (x, y) { var shape = new createjs.Shape(); self.mapContainer.lineIndex = self.mapContainer.children.length; self.mapContainer.addChild(shape); shape.graphics.setStrokeStyle(3, "round").beginStroke('#3d3d3d').moveTo((self.compass.x - self.mapContainer.x) / self.mapContainer.scale, (self.compass.y - self.mapContainer.y) / self.mapContainer.scale).lineTo(x, y); }; this.mapContainer.removeLine = function () { if (self.mapContainer.lineIndex) { self.mapContainer.removeChildAt(self.mapContainer.lineIndex); } }; //this.globalContainer.cursor = "default"; }; Map.prototype.populateStatistics = function () { var stats = $("#ports-number"); $.each(this.statistics, function(name, number) { stats.append('<strong>' + name + ' : </strong>' + number + '<br>'); }) }; Map.prototype.setScale = function (scale) { this.mapContainer.scale = this.mapContainer.scaleX = this.mapContainer.scaleY = scale; }; Map.prototype.zoom = function (increment) { this.setScale(this.mapContainer.scale + increment); }; Map.prototype.addPorts = function () { var self = this; setTimeout(function() { Ports.forEach(function (port, idx) { var circle = new createjs.Shape(); circle.graphics.beginFill(self.config.color[port.Nation]).drawCircle(0, 0, 5); circle.x = (port.sourcePosition.x + self.config.portsOffset.x) * self.config.portsOffset.ratio; circle.y = (port.sourcePosition.y + self.config.portsOffset.y) * self.config.portsOffset.ratio; circle.cursor = "pointer"; circle.idx = idx; self.statistics[getNationFromIdx(port.Nation).Name] += 1; circle.on("click", function () { var currPort = Ports[this.idx]; $('#port-title').text(currPort.Name); $('#nation').text(getNationFromIdx(currPort.Nation).Name); var timer = currPort.ConquestFlagTimeSlot + 'h - ' + (currPort.ConquestFlagTimeSlot + 2) + "h"; $('#timer').text(currPort.ConquestFlagTimeSlot == -1?'No Timer':timer); $('#capital').text(currPort.Capital?'yes':'no'); $('#regional').text(currPort.Regional?'yes':'no'); $('#shallow').text(currPort.Depth == 1?'yes':'no'); $('#capturer').text(currPort.Capturer); var produces = Shops[this.idx].ResourcesProduced; var consumes = Shops[this.idx].ResourcesConsumed; $('#produces-list').html(''); $('#consumes-list').html(''); produces.forEach(function (produce) { var item = getItemTemplateFromId(produce.Key); $('#produces-list').append('<li class="list-group-item">'+item.Name+' : '+ produce.Value+'</li>'); }); consumes.forEach(function (consume) { var item = getItemTemplateFromId(consume.Key); $('#consumes-list').append('<li class="list-group-item">'+item.Name+' : '+ consume.Value+'</li>'); }); }); circle.cache(-5, -5, 10, 10); self.mapContainer.addChild(circle); }); self.update = true; self.stage.tick(); self.populateStatistics(); },200); }; Map.prototype.keepMapUnderPos = function (x, y) { var mapPos = this.getMapPosFromWindowPos(x, y); this.globalContainer.x = x - this.mapContainer.scale * mapPos.x; this.globalContainer.y = y - this.mapContainer.scale * mapPos.y; }; Map.prototype.keepCompassUnderCurrentPos = function () { var mapPos = this.getMapPosFromWindowPos(this.compass.x + this.unmodifiedMapContainer.x, this.compass.y + this.unmodifiedMapContainer.y); this.compass.x = mapPos.x * this.mapContainer.scale; this.compass.y = mapPos.y * this.mapContainer.scale; }; Map.prototype.centerTo = function (x, y) { this.globalContainer.x = this.canvas.width / 2 - this.mapContainer.scale * x; this.globalContainer.y = this.canvas.height / 2 - this.mapContainer.scale * y; }; Map.prototype.getNewWindowPosFromMapPos = function (x, y) { return { x: x * this.mapContainer.scale + this.mapContainer.x - this.globalContainer.x, y: y * this.mapContainer.scale + this.mapContainer.y - this.globalContainer.y } }; Map.prototype.getMapPosFromGpsPos = function(x , y) { return { x: Math.round(x * this.config.gps.ratio + this.config.gps.x), y: Math.round(-(y * this.config.gps.ratio - this.config.gps.y)) } }; Map.prototype.getMapPosFromWindowPos = function (x, y) { return { x: (x - this.unmodifiedMapContainer.x) / this.unmodifiedMapContainer.scale, y: (y - this.unmodifiedMapContainer.y) / this.unmodifiedMapContainer.scale }; }; Map.prototype.gps = function (x, y) { if (this.gpsCursor) { this.mapContainer.removeChild(this.gpsCursor); } this.gpsCursor = new createjs.Shape(); this.gpsCursor.graphics.setStrokeStyle(2).beginStroke("OrangeRed").drawCircle(0,0,30); var mapPos = this.getMapPosFromGpsPos(x, y); this.gpsCursor.x = mapPos.x + (Math.random() > 0.5 ? Math.floor((Math.random() * 10 * 13 / 10)) : - Math.floor((Math.random() * 10 * 13 / 10))); this.gpsCursor.y = mapPos.y + (Math.random() > 0.5 ? Math.floor((Math.random() * 10 * 13 / 10)) : - Math.floor((Math.random() * 10 * 13 / 10))); this.mapContainer.addChild(this.gpsCursor); this.centerTo(mapPos.x, mapPos.y); this.update = true; }; Map.prototype.gpsSubmitEvent = function () { var self = this; $("#gpsForm").submit(function (event) { event.preventDefault(); self.gps($('#xGps').val(), $('#yGps').val()); }); }; Map.prototype.createAllEvents = function () { this.resizeCanvasEvent(); this.gpsSubmitEvent(); this.mouseDownEvent(); this.clickEvent(); this.pressMoveEvent(); //this.pressUpEvent(); this.dblClickEvent(); this.mouseWheelEvent(); }; Map.prototype.dblClickEvent = function () { var self = this; this.globalContainer.on("dblclick", function (evt) { if (this.hasBeenDblClicked) { self.mapContainer.addLine((evt.stageX - self.globalContainer.x) / self.mapContainer.scale, (evt.stageY - self.globalContainer.y) / self.mapContainer.scale); this.hasBeenDblClicked = false; } else { self.mapContainer.removeLine(); self.compass.x = (evt.stageX - self.globalContainer.x); self.compass.y = (evt.stageY - self.globalContainer.y); this.hasBeenDblClicked = true; } self.update = true; }); }; Map.prototype.clickEvent = function () { var self = this; this.globalContainer.on("click", function (evt) { var mapPos = self.getMapPosFromWindowPos(evt.stageX, evt.stageY); var gpsPos = { x: Math.round((mapPos.x - self.config.gps.x) / self.config.gps.ratio), y: Math.round(-(mapPos.y - self.config.gps.y) / self.config.gps.ratio) }; $('#cursorX').text(gpsPos.x); $('#cursorY').text(gpsPos.y); }); }; Map.prototype.mouseDownEvent = function () { this.globalContainer.on("mousedown", function (evt) { this.offset = {x: this.x - evt.stageX, y: this.y - evt.stageY}; //this.cursor = "move"; }); }; Map.prototype.pressMoveEvent = function () { var self = this; this.globalContainer.on("pressmove", function (evt) { this.x = evt.stageX + this.offset.x; this.y = evt.stageY + this.offset.y; //this.cursor = "move"; self.update = true; }); }; Map.prototype.pressUpEvent = function () { var self = this; this.globalContainer.on("pressup", function (evt) { this.cursor = "default"; //self.update = true; }); }; Map.prototype.mouseWheelEvent = function () { var self = this; $('#canvas').mousewheel(function (event) { if (!self.alreadyZooming) { self.alreadyZooming = true; setTimeout(function () { self.alreadyZooming = false; }, 45); if (event.deltaY == 1) { if (self.mapContainer.scale < 1.8) { self.zoom(0.1); self.keepMapUnderPos(event.pageX, event.pageY); self.keepCompassUnderCurrentPos(); } } else if (event.deltaY == -1) { if (self.mapContainer.scale > 0.4) { self.zoom(-0.1); self.keepMapUnderPos(event.pageX, event.pageY); self.keepCompassUnderCurrentPos(); } } self.update = true; } }); }; Map.prototype.resizeCanvasEvent = function () { var self = this; window.addEventListener('resize', function(){self.resizeCanvas(self)}, false); }; Map.prototype.resizeCanvas = function (self) { self.canvas.width = window.innerWidth; self.canvas.height = window.innerHeight; self.update = true; }; Map.prototype.tickEvent = function () { var self = this; createjs.Ticker.addEventListener("tick", function (event) { self.fpsLabel.text = Math.round(createjs.Ticker.getMeasuredFPS()) + " fps"; if (self.update) { self.copyMapContainer(); self.update = false; // only update once self.stage.update(event); } }); }; Map.prototype.copyMapContainer = function () { this.unmodifiedMapContainer = { x: this.globalContainer.x, y: this.globalContainer.y, scale: this.mapContainer.scale } }; function Compass(imageCompass, config) { this.addChild(new createjs.Bitmap(imageCompass).setTransform(-imageCompass.width / 2, -imageCompass.height / 2)); this.setScale(config.compass.scale); this.x = config.compass.x; this.y = config.compass.y; } Compass.prototype = new createjs.Container(); Compass.prototype.constructor = Compass; Compass.prototype.setScale = function (scale) { this.scale = this.scaleX = this.scaleY = scale; };
using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows; using System.Windows.Controls; using System.Windows.Data; using System.Windows.Documents; using System.Windows.Input; using System.Windows.Media; using System.Windows.Media.Imaging; using System.Windows.Navigation; using System.Windows.Shapes; namespace Gama.Atenciones.Wpf.Views { /// <summary> /// Interaction logic for SearchBoxView.xaml /// </summary> public partial class SearchBoxView : UserControl { public SearchBoxView() { InitializeComponent(); } } }
YUI.add("inputex-inplaceedit", function(Y){ var lang = Y.Lang;//, Event = YAHOO.util.Event, Dom = YAHOO.util.Dom, CSS_PREFIX = 'inputEx-InPlaceEdit-'; /** * Meta field providing in place editing (the editor appears when you click on the formatted value). * @class inputEx.InPlaceEdit * @extends inputEx.Field * @constructor * @param {Object} options Added options: * <ul> * <li>visu</li> * <li>editorField</li> * <li>animColors</li> * </ul> */ inputEx.InPlaceEdit = function(options) { inputEx.InPlaceEdit.superclass.constructor.call(this, options); this.publish('openEditor'); this.publish('closeEditor'); }; lang.extend(inputEx.InPlaceEdit, inputEx.Field, { /** * Set the default values of the options * @param {Object} options Options object as passed to the constructor */ setOptions: function(options) { inputEx.InPlaceEdit.superclass.setOptions.call(this, options); this.options.visu = options.visu; this.options.editorField = options.editorField; //this.options.buttonTypes = options.buttonTypes || {ok:"submit",cancel:"link"}; this.options.buttonConfigs = options.buttonConfigs || [{ type: "submit", value: inputEx.messages.okEditor, className: "inputEx-Button "+CSS_PREFIX+'OkButton', onClick: {fn: this.onOkEditor, scope:this} },{ type: "link", value: inputEx.messages.cancelEditor, className: "inputEx-Button "+CSS_PREFIX+'CancelLink', onClick: {fn: this.onCancelEditor, scope:this} }]; this.options.animColors = options.animColors || null; }, /** * Override renderComponent to create 2 divs: the visualization one, and the edit in place form */ renderComponent: function() { this.renderVisuDiv(); this.renderEditor(); }, /** * Render the editor */ renderEditor: function() { this.editorContainer = inputEx.cn('div', {className: CSS_PREFIX+'editor'}, {display: 'none'}); // Render the editor field this.editorField = inputEx(this.options.editorField,this); var editorFieldEl = this.editorField.getEl(); this.editorContainer.appendChild( editorFieldEl ); Y.one(editorFieldEl).addClass(CSS_PREFIX+'editorDiv'); this.buttons = []; for (var i = 0; i < this.options.buttonConfigs.length ; i++){ var config = this.options.buttonConfigs[i]; config.parentEl = this.editorContainer; this.buttons.push(new inputEx.widget.Button(config)); } // Line breaker () this.editorContainer.appendChild( inputEx.cn('div',null, {clear: 'both'}) ); this.fieldContainer.appendChild(this.editorContainer); }, /** * Set the color when hovering the field * @param {Event} e The original mouseover event */ onVisuMouseOver: function(e) { // to totally disable the visual effect on mouse enter, you should change css options inputEx-InPlaceEdit-visu:hover if(this.disabled) return; if(this.colorAnim) { this.colorAnim.stop(true); } inputEx.sn(this.formattedContainer, null, {backgroundColor: this.options.animColors.from }); }, /** * Start the color animation when hovering the field * @param {Event} e The original mouseout event */ onVisuMouseOut: function(e) { var optionsAnim; if(this.disabled) return; // Start animation if(this.colorAnim) { this.colorAnim.stop(true); } if(!this.options.animColors) return; optionsAnim = { node: Y.one(this.formattedContainer), } if(this.options.animColors.from){ optionsAnim.from = { backgroundColor : this.options.animColors.from } } if(this.options.animColors.from){ optionsAnim.to = { backgroundColor : this.options.animColors.to } } this.colorAnim = new Y.Anim(optionsAnim); this.colorAnim.on("end",function() { Y.one(this.formattedContainer).setStyle('background-color', ''); }); this.colorAnim.run(); }, /** * Create the div that will contain the visualization of the value */ renderVisuDiv: function() { this.formattedContainer = inputEx.cn('div', {className: 'inputEx-InPlaceEdit-visu'}); if( lang.isFunction(this.options.formatDom) ) { this.formattedContainer.appendChild( this.options.formatDom(this.options.value) ); } else if( lang.isFunction(this.options.formatValue) ) { this.formattedContainer.innerHTML = this.options.formatValue(this.options.value); } else { this.formattedContainer.innerHTML = lang.isUndefined(this.options.value) ? inputEx.messages.emptyInPlaceEdit: this.options.value; } this.fieldContainer.appendChild(this.formattedContainer); }, /** * Adds the events for the editor and color animations */ initEvents: function() { Y.one(this.formattedContainer).on("click", this.openEditor, this, true); // For color animation (if specified) if (this.options.animColors) { Y.one(this.formattedContainer).on('mouseover', this.onVisuMouseOver, this); Y.one(this.formattedContainer).on('mouseout', this.onVisuMouseOut, this); } if(this.editorField.el) { var that = this; // Register some listeners Y.on("key", function(){ that.onKeyUp },"#"+Y.one(this.editorField.el).get("id"),"up:"); Y.on("key", function(){ that.onKeyDown },"#"+Y.one(this.editorField.el).get("id"),"down:" ); } }, /** * Handle some keys events to close the editor * @param {Event} e The original keyup event */ onKeyUp: function(e) { // Enter if( e.keyCode == 13) { this.onOkEditor(e); } // Escape if( e.keyCode == 27) { this.onCancelEditor(e); } }, /** * Handle the tabulation key to close the editor * @param {Event} e The original keydown event */ onKeyDown: function(e) { // Tab if(e.keyCode == 9) { this.onOkEditor(e); } }, /** * Validate the editor (ok button, enter key or tabulation key) */ onOkEditor: function(e) { e.halt(); var newValue = this.editorField.getValue(); this.setValue(newValue); this.closeEditor(); var that = this; setTimeout(function() {that.fire("updated",newValue);}, 50); }, /** * Close the editor on cancel (cancel button, blur event or escape key) * @param {Event} e The original event (click, blur or keydown) */ onCancelEditor: function(e) { e.halt(); this.closeEditor(); }, /** * Close the editor on cancel (cancel button, blur event or escape key) * @param {Event} e The original event (click, blur or keydown) */ closeEditor: function() { this.editorContainer.style.display = 'none'; this.formattedContainer.style.display = ''; this.fire("closeEditor") }, /** * Override enable to Enable openEditor */ enable: function(){ this.disabled = false; inputEx.sn(this.formattedContainer, {className: 'inputEx-InPlaceEdit-visu'}); }, /** * Override disable to Disable openEditor */ disable: function(){ this.disabled = true; inputEx.sn(this.formattedContainer, {className: 'inputEx-InPlaceEdit-visu-disable'}); }, /** * Display the editor */ openEditor: function() { if(this.disabled) return; var value = this.getValue(); this.editorContainer.style.display = ''; this.formattedContainer.style.display = 'none'; if(!lang.isUndefined(value)) { this.editorField.setValue(value); } // Set focus in the element ! this.editorField.focus(); // Select the content if(this.editorField.el && lang.isFunction(this.editorField.el.setSelectionRange) && (!!value && !!value.length)) { this.editorField.el.setSelectionRange(0,value.length); } this.fire("openEditor"); }, /** * Returned the previously stored value * @return {Any} The value of the subfield */ getValue: function() { var editorOpened = (this.editorContainer.style.display == ''); return editorOpened ? this.editorField.getValue() : this.value; }, /** * Set the value and update the display * @param {Any} value The value to set * @param {boolean} [sendUpdatedEvt] (optional) Wether this setValue should fire the updatedEvt or not (default is true, pass false to NOT send the event) */ setValue: function(value, sendUpdatedEvt) { // Store the value this.value = value; if(lang.isUndefined(value) || value == "") { inputEx.renderVisu(this.options.visu, inputEx.messages.emptyInPlaceEdit, this.formattedContainer); } else { inputEx.renderVisu(this.options.visu, this.value, this.formattedContainer); } // If the editor is opened, update it if(this.editorContainer.style.display == '') { this.editorField.setValue(value); } inputEx.InPlaceEdit.superclass.setValue.call(this, value, sendUpdatedEvt); }, /** * Close the editor when calling the close function on this field */ close: function() { this.editorContainer.style.display = 'none'; this.formattedContainer.style.display = ''; this.fire("openEditor"); } }); inputEx.messages.emptyInPlaceEdit = "(click to edit)"; inputEx.messages.cancelEditor = "cancel"; inputEx.messages.okEditor = "Ok"; // Register this class as "inplaceedit" type inputEx.registerType("inplaceedit", inputEx.InPlaceEdit, [ { type:'type', label: 'Editor', name: 'editorField'} ]); }, '0.1.1', { requires:["anim","inputex-field","inputex-button"] })
b'(0 - 1) + (3 - 0) + -6 + -2\n'
'use strict'; /** * The basic http module, used to create the server. * * @link http://nodejs.org/api/http.html */ alchemy.use('http', 'http'); /** * This module contains utilities for handling and transforming file paths. * Almost all these methods perform only string transformations. * The file system is not consulted to check whether paths are valid. * * @link http://nodejs.org/api/path.html */ alchemy.use('path', 'path'); /** * File I/O is provided by simple wrappers around standard POSIX functions. * * @link http://nodejs.org/api/fs.html */ alchemy.use('graceful-fs', 'fs'); /** * Usefull utilities. * * @link http://nodejs.org/api/util.html */ alchemy.use('util', 'util'); /** * The native mongodb library * * @link https://npmjs.org/package/mongodb */ alchemy.use('mongodb', 'mongodb'); /** * The LESS interpreter. * * @link https://npmjs.org/package/less */ alchemy.use('less', 'less'); /** * Hawkejs view engine * * @link https://npmjs.org/package/hawkejs */ alchemy.use('hawkejs', 'hawkejs'); alchemy.hawkejs = new Classes.Hawkejs.Hawkejs; /** * The function to detect when everything is too busy */ alchemy.toobusy = alchemy.use('toobusy-js', 'toobusy'); // If the config is a number, use that as the lag threshold if (typeof alchemy.settings.toobusy === 'number') { alchemy.toobusy.maxLag(alchemy.settings.toobusy); } /** * Load Sputnik, the stage-based launcher */ alchemy.sputnik = new (alchemy.use('sputnik', 'sputnik'))(); /** * Real-time apps made cross-browser & easy with a WebSocket-like API. * * @link https://npmjs.org/package/socket.io */ alchemy.use('socket.io', 'io'); /** * Recursively mkdir, like `mkdir -p`. * This is a requirement fetched from express * * @link https://npmjs.org/package/mkdirp */ alchemy.use('mkdirp', 'mkdirp'); /** * Base useragent library * * @link https://npmjs.org/package/useragent */ alchemy.use('useragent'); /** * Enable the `satisfies` method in the `useragent` library * * @link https://www.npmjs.com/package/useragent#adding-more-features-to-the-useragent */ require('useragent/features');
b'What is 5 + -9 + -2 + (-1 - 4)?\n'
b'Evaluate (8 - 0) + (-45 - -31).\n'
b'-2 - (-3 + 7 + (30 - 11) + -2)\n'
b'-13 - (-5 - (-14 + 14))\n'
b'Calculate 29 + (10 - 20) - (32 + -23).\n'
// Copyright (c) 2016-2020 The ZCash developers // Copyright (c) 2020 The PIVX developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. /* See the Zcash protocol specification for more information. https://github.com/zcash/zips/blob/master/protocol/protocol.pdf */ #ifndef ZC_NOTE_ENCRYPTION_H_ #define ZC_NOTE_ENCRYPTION_H_ #include "uint256.h" #include "sapling/address.h" #include "sapling/sapling.h" #include <array> namespace libzcash { // Ciphertext for the recipient to decrypt typedef std::array<unsigned char, ZC_SAPLING_ENCCIPHERTEXT_SIZE> SaplingEncCiphertext; typedef std::array<unsigned char, ZC_SAPLING_ENCPLAINTEXT_SIZE> SaplingEncPlaintext; // Ciphertext for outgoing viewing key to decrypt typedef std::array<unsigned char, ZC_SAPLING_OUTCIPHERTEXT_SIZE> SaplingOutCiphertext; typedef std::array<unsigned char, ZC_SAPLING_OUTPLAINTEXT_SIZE> SaplingOutPlaintext; //! This is not a thread-safe API. class SaplingNoteEncryption { protected: // Ephemeral public key uint256 epk; // Ephemeral secret key uint256 esk; bool already_encrypted_enc; bool already_encrypted_out; SaplingNoteEncryption(uint256 epk, uint256 esk) : epk(epk), esk(esk), already_encrypted_enc(false), already_encrypted_out(false) { } public: static boost::optional<SaplingNoteEncryption> FromDiversifier(diversifier_t d); boost::optional<SaplingEncCiphertext> encrypt_to_recipient( const uint256 &pk_d, const SaplingEncPlaintext &message ); SaplingOutCiphertext encrypt_to_ourselves( const uint256 &ovk, const uint256 &cv, const uint256 &cm, const SaplingOutPlaintext &message ); uint256 get_epk() const { return epk; } uint256 get_esk() const { return esk; } }; // Attempts to decrypt a Sapling note. This will not check that the contents // of the ciphertext are correct. boost::optional<SaplingEncPlaintext> AttemptSaplingEncDecryption( const SaplingEncCiphertext &ciphertext, const uint256 &ivk, const uint256 &epk ); // Attempts to decrypt a Sapling note using outgoing plaintext. // This will not check that the contents of the ciphertext are correct. boost::optional<SaplingEncPlaintext> AttemptSaplingEncDecryption ( const SaplingEncCiphertext &ciphertext, const uint256 &epk, const uint256 &esk, const uint256 &pk_d ); // Attempts to decrypt a Sapling note. This will not check that the contents // of the ciphertext are correct. boost::optional<SaplingOutPlaintext> AttemptSaplingOutDecryption( const SaplingOutCiphertext &ciphertext, const uint256 &ovk, const uint256 &cv, const uint256 &cm, const uint256 &epk ); } #endif /* ZC_NOTE_ENCRYPTION_H_ */
b'17 - 6 - (-1 + (-1 - -8) + 1)\n'
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/> <meta http-equiv="X-UA-Compatible" content="IE=9"/> <meta name="generator" content="Doxygen 1.8.11"/> <title>V8 API Reference Guide for node.js v8.10.0: Class Members</title> <link href="tabs.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="jquery.js"></script> <script type="text/javascript" src="dynsections.js"></script> <link href="search/search.css" rel="stylesheet" type="text/css"/> <script type="text/javascript" src="search/searchdata.js"></script> <script type="text/javascript" src="search/search.js"></script> <script type="text/javascript"> $(document).ready(function() { init_search(); }); </script> <link href="doxygen.css" rel="stylesheet" type="text/css" /> </head> <body> <div id="top"><!-- do not remove this div, it is closed by doxygen! --> <div id="titlearea"> <table cellspacing="0" cellpadding="0"> <tbody> <tr style="height: 56px;"> <td id="projectalign" style="padding-left: 0.5em;"> <div id="projectname">V8 API Reference Guide for node.js v8.10.0 </div> </td> </tr> </tbody> </table> </div> <!-- end header part --> <!-- Generated by Doxygen 1.8.11 --> <script type="text/javascript"> var searchBox = new SearchBox("searchBox", "search",false,'Search'); </script> <div id="navrow1" class="tabs"> <ul class="tablist"> <li><a href="index.html"><span>Main&#160;Page</span></a></li> <li><a href="pages.html"><span>Related&#160;Pages</span></a></li> <li><a href="namespaces.html"><span>Namespaces</span></a></li> <li class="current"><a href="annotated.html"><span>Classes</span></a></li> <li><a href="files.html"><span>Files</span></a></li> <li><a href="examples.html"><span>Examples</span></a></li> <li> <div id="MSearchBox" class="MSearchBoxInactive"> <span class="left"> <img id="MSearchSelect" src="search/mag_sel.png" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" alt=""/> <input type="text" id="MSearchField" value="Search" accesskey="S" onfocus="searchBox.OnSearchFieldFocus(true)" onblur="searchBox.OnSearchFieldFocus(false)" onkeyup="searchBox.OnSearchFieldChange(event)"/> </span><span class="right"> <a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a> </span> </div> </li> </ul> </div> <div id="navrow2" class="tabs2"> <ul class="tablist"> <li><a href="annotated.html"><span>Class&#160;List</span></a></li> <li><a href="classes.html"><span>Class&#160;Index</span></a></li> <li><a href="inherits.html"><span>Class&#160;Hierarchy</span></a></li> <li class="current"><a href="functions.html"><span>Class&#160;Members</span></a></li> </ul> </div> <div id="navrow3" class="tabs2"> <ul class="tablist"> <li class="current"><a href="functions.html"><span>All</span></a></li> <li><a href="functions_func.html"><span>Functions</span></a></li> <li><a href="functions_vars.html"><span>Variables</span></a></li> <li><a href="functions_type.html"><span>Typedefs</span></a></li> <li><a href="functions_enum.html"><span>Enumerations</span></a></li> </ul> </div> <div id="navrow4" class="tabs3"> <ul class="tablist"> <li><a href="functions.html#index_a"><span>a</span></a></li> <li><a href="functions_b.html#index_b"><span>b</span></a></li> <li><a href="functions_c.html#index_c"><span>c</span></a></li> <li><a href="functions_d.html#index_d"><span>d</span></a></li> <li><a href="functions_e.html#index_e"><span>e</span></a></li> <li class="current"><a href="functions_f.html#index_f"><span>f</span></a></li> <li><a href="functions_g.html#index_g"><span>g</span></a></li> <li><a href="functions_h.html#index_h"><span>h</span></a></li> <li><a href="functions_i.html#index_i"><span>i</span></a></li> <li><a href="functions_j.html#index_j"><span>j</span></a></li> <li><a href="functions_k.html#index_k"><span>k</span></a></li> <li><a href="functions_l.html#index_l"><span>l</span></a></li> <li><a href="functions_m.html#index_m"><span>m</span></a></li> <li><a href="functions_n.html#index_n"><span>n</span></a></li> <li><a href="functions_o.html#index_o"><span>o</span></a></li> <li><a href="functions_p.html#index_p"><span>p</span></a></li> <li><a href="functions_r.html#index_r"><span>r</span></a></li> <li><a href="functions_s.html#index_s"><span>s</span></a></li> <li><a href="functions_t.html#index_t"><span>t</span></a></li> <li><a href="functions_u.html#index_u"><span>u</span></a></li> <li><a href="functions_v.html#index_v"><span>v</span></a></li> <li><a href="functions_w.html#index_w"><span>w</span></a></li> <li><a href="functions_0x7e.html#index_0x7e"><span>~</span></a></li> </ul> </div> </div><!-- top --> <!-- window showing the filter options --> <div id="MSearchSelectWindow" onmouseover="return searchBox.OnSearchSelectShow()" onmouseout="return searchBox.OnSearchSelectHide()" onkeydown="return searchBox.OnSearchSelectKey(event)"> </div> <!-- iframe showing the search results (closed by default) --> <div id="MSearchResultsWindow"> <iframe src="javascript:void(0)" frameborder="0" name="MSearchResults" id="MSearchResults"> </iframe> </div> <div class="contents"> <div class="textblock">Here is a list of all documented class members with links to the class documentation for each member:</div> <h3><a class="anchor" id="index_f"></a>- f -</h3><ul> <li>FindInstanceInPrototypeChain() : <a class="el" href="classv8_1_1Object.html#ae2ad9fee9db6e0e5da56973ebb8ea2bc">v8::Object</a> </li> <li>FindObjectById() : <a class="el" href="classv8_1_1HeapProfiler.html#ace729f9b7dbb2ca8b2fd67551bf5aae8">v8::HeapProfiler</a> </li> <li>Flags : <a class="el" href="classv8_1_1RegExp.html#aa4718a5c1f18472aff3bf51ed694fc5a">v8::RegExp</a> </li> <li>For() : <a class="el" href="classv8_1_1Symbol.html#a8a4a6bdc7d3e31c71cf48fa5cb811fc8">v8::Symbol</a> </li> <li>ForApi() : <a class="el" href="classv8_1_1Private.html#a0ab8628387166b8a8abc6e9b6f40ad55">v8::Private</a> , <a class="el" href="classv8_1_1Symbol.html#ac3937f0b0b831c4be495a399f26d7301">v8::Symbol</a> </li> <li>Free() : <a class="el" href="classv8_1_1ArrayBuffer_1_1Allocator.html#acaf1ec8820d5b994eb5a11f2c0ee38e0">v8::ArrayBuffer::Allocator</a> </li> <li>FreeBufferMemory() : <a class="el" href="classv8_1_1ValueSerializer_1_1Delegate.html#a6cea3e757221e6e15b0fdb708482a176">v8::ValueSerializer::Delegate</a> </li> <li>FromJust() : <a class="el" href="classv8_1_1Maybe.html#a02b19d7fcb7744d8dba3530ef8e14c8c">v8::Maybe&lt; T &gt;</a> </li> <li>FromMaybe() : <a class="el" href="classv8_1_1Maybe.html#a0bcb5fb0d0e92a3f0cc546f11068a8df">v8::Maybe&lt; T &gt;</a> , <a class="el" href="classv8_1_1MaybeLocal.html#afe1aea162c64385160cc1c83df859eaf">v8::MaybeLocal&lt; T &gt;</a> </li> <li>FromSnapshot() : <a class="el" href="classv8_1_1Context.html#a49a8fb02c04b6ebf4e532755d50d2ff9">v8::Context</a> , <a class="el" href="classv8_1_1FunctionTemplate.html#acd9eaca4c7d6de89949b8e1c41f4ba46">v8::FunctionTemplate</a> , <a class="el" href="classv8_1_1ObjectTemplate.html#a7899f31276e3ca69358005e360e3bc27">v8::ObjectTemplate</a> </li> </ul> </div><!-- contents --> <!-- start footer part --> <hr class="footer"/><address class="footer"><small> Generated by &#160;<a href="http://www.doxygen.org/index.html"> <img class="footer" src="doxygen.png" alt="doxygen"/> </a> 1.8.11 </small></address> </body> </html>
b'-79 + 113 + -1 + -17\n'
b'Evaluate (12 - 12) + 8 - (1 + (0 - 20)).\n'
b'What is (18 - (-17 - -75)) + 25?\n'
b'Evaluate -9 + -10 + (4 - -34).\n'
--- layout: post title: More Office Interop in PowerShell --- As part of our team's workflow we create various data files and then generate tracking issues that we then import into our issue tracking system. We have a semi-automated process to do this which works fairly well but for some older issues we had imported I noticed that a vital piece of information was missing. When we ingest the issues into the system there is an identifier that we save into the issue tracking system so we can find this information in our data files later. We also generate some reports from our data files one of which is an Excel spreadsheet that contains the issue identifier and which also contains the information that was missing from the issue tracking system. Since there were hundreds of issue that needed updating I didn't want to update all of the issues in the issue tracking system manually. The issue tracking system allowed me to create a query and then download a CSV of the issues that were missing the data. Then I found the spreadsheets that had the data and wrote the following PowerShell script to generate a CSV file with the missing data mapped to the issue identifiers: ```powershell param( [Parameter(Mandatory)][string]$issuesCsv, [Parameter(Mandatory)][string]$excelReport ) Add-Type -AssemblyName Microsoft.Office.Interop.Excel function Get-IssueData { param( [Parameter(Mandatory)]$workbook, [Parameter(Mandatory)][PSCustomObject[]]$issues ) $issueData = @() foreach ($issue in $issues) { if (-not $issue.IssueId) { continue } foreach ($worksheet in $workbook.Worksheets) { $target = $worksheet.UsedRange.Find($issueId) if ($target) { $csvIssue = [PSCustomObject]@{ IssueId = $issue.IssueId MissingFieldData = $target.EntireRow.Value2[1, 5] } $issueData += $csvIssue break } } } return $issueData } try { $issues = Import-Csv -Path $path } catch { "Unable to import issues." exit 1 } $application = New-Object -ComObject Excel.Application try { $workbook = $application.Workbooks.Open($ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath($excelReport)) } catch { "Unable to open workbook." $application.Quit() exit 1 } Get-IssueData $workbook $issues | Export-Csv -Path export.csv -NoTypeInformation $workbook.Close($false) $application.Quit() ```
.loading { margin: 0 auto; width: 100px; padding-top: 50px; } /*! * Load Awesome v1.1.0 (http://github.danielcardoso.net/load-awesome/) * Copyright 2015 Daniel Cardoso <@DanielCardoso> * Licensed under MIT */ .la-ball-fussion, .la-ball-fussion > div { position: relative; -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; } .la-ball-fussion { display: block; font-size: 0; color: #fff; } .la-ball-fussion.la-dark { color: #333; } .la-ball-fussion > div { display: inline-block; float: none; background-color: currentColor; border: 0 solid currentColor; } .la-ball-fussion { width: 8px; height: 8px; } .la-ball-fussion > div { position: absolute; width: 12px; height: 12px; border-radius: 100%; -webkit-transform: translate(-50%, -50%); -moz-transform: translate(-50%, -50%); -ms-transform: translate(-50%, -50%); -o-transform: translate(-50%, -50%); transform: translate(-50%, -50%); -webkit-animation: ball-fussion-ball1 1s 0s ease infinite; -moz-animation: ball-fussion-ball1 1s 0s ease infinite; -o-animation: ball-fussion-ball1 1s 0s ease infinite; animation: ball-fussion-ball1 1s 0s ease infinite; } .la-ball-fussion > div:nth-child(1) { top: 0; left: 50%; z-index: 1; } .la-ball-fussion > div:nth-child(2) { top: 50%; left: 100%; z-index: 2; -webkit-animation-name: ball-fussion-ball2; -moz-animation-name: ball-fussion-ball2; -o-animation-name: ball-fussion-ball2; animation-name: ball-fussion-ball2; } .la-ball-fussion > div:nth-child(3) { top: 100%; left: 50%; z-index: 1; -webkit-animation-name: ball-fussion-ball3; -moz-animation-name: ball-fussion-ball3; -o-animation-name: ball-fussion-ball3; animation-name: ball-fussion-ball3; } .la-ball-fussion > div:nth-child(4) { top: 50%; left: 0; z-index: 2; -webkit-animation-name: ball-fussion-ball4; -moz-animation-name: ball-fussion-ball4; -o-animation-name: ball-fussion-ball4; animation-name: ball-fussion-ball4; } .la-ball-fussion.la-sm { width: 4px; height: 4px; } .la-ball-fussion.la-sm > div { width: 6px; height: 6px; } .la-ball-fussion.la-2x { width: 16px; height: 16px; } .la-ball-fussion.la-2x > div { width: 24px; height: 24px; } .la-ball-fussion.la-3x { width: 24px; height: 24px; } .la-ball-fussion.la-3x > div { width: 36px; height: 36px; } /* * Animations */ @-webkit-keyframes ball-fussion-ball1 { 0% { opacity: .35; } 50% { top: -100%; left: 200%; opacity: 1; } 100% { top: 50%; left: 100%; z-index: 2; opacity: .35; } } @-moz-keyframes ball-fussion-ball1 { 0% { opacity: .35; } 50% { top: -100%; left: 200%; opacity: 1; } 100% { top: 50%; left: 100%; z-index: 2; opacity: .35; } } @-o-keyframes ball-fussion-ball1 { 0% { opacity: .35; } 50% { top: -100%; left: 200%; opacity: 1; } 100% { top: 50%; left: 100%; z-index: 2; opacity: .35; } } @keyframes ball-fussion-ball1 { 0% { opacity: .35; } 50% { top: -100%; left: 200%; opacity: 1; } 100% { top: 50%; left: 100%; z-index: 2; opacity: .35; } } @-webkit-keyframes ball-fussion-ball2 { 0% { opacity: .35; } 50% { top: 200%; left: 200%; opacity: 1; } 100% { top: 100%; left: 50%; z-index: 1; opacity: .35; } } @-moz-keyframes ball-fussion-ball2 { 0% { opacity: .35; } 50% { top: 200%; left: 200%; opacity: 1; } 100% { top: 100%; left: 50%; z-index: 1; opacity: .35; } } @-o-keyframes ball-fussion-ball2 { 0% { opacity: .35; } 50% { top: 200%; left: 200%; opacity: 1; } 100% { top: 100%; left: 50%; z-index: 1; opacity: .35; } } @keyframes ball-fussion-ball2 { 0% { opacity: .35; } 50% { top: 200%; left: 200%; opacity: 1; } 100% { top: 100%; left: 50%; z-index: 1; opacity: .35; } } @-webkit-keyframes ball-fussion-ball3 { 0% { opacity: .35; } 50% { top: 200%; left: -100%; opacity: 1; } 100% { top: 50%; left: 0; z-index: 2; opacity: .35; } } @-moz-keyframes ball-fussion-ball3 { 0% { opacity: .35; } 50% { top: 200%; left: -100%; opacity: 1; } 100% { top: 50%; left: 0; z-index: 2; opacity: .35; } } @-o-keyframes ball-fussion-ball3 { 0% { opacity: .35; } 50% { top: 200%; left: -100%; opacity: 1; } 100% { top: 50%; left: 0; z-index: 2; opacity: .35; } } @keyframes ball-fussion-ball3 { 0% { opacity: .35; } 50% { top: 200%; left: -100%; opacity: 1; } 100% { top: 50%; left: 0; z-index: 2; opacity: .35; } } @-webkit-keyframes ball-fussion-ball4 { 0% { opacity: .35; } 50% { top: -100%; left: -100%; opacity: 1; } 100% { top: 0; left: 50%; z-index: 1; opacity: .35; } } @-moz-keyframes ball-fussion-ball4 { 0% { opacity: .35; } 50% { top: -100%; left: -100%; opacity: 1; } 100% { top: 0; left: 50%; z-index: 1; opacity: .35; } } @-o-keyframes ball-fussion-ball4 { 0% { opacity: .35; } 50% { top: -100%; left: -100%; opacity: 1; } 100% { top: 0; left: 50%; z-index: 1; opacity: .35; } } @keyframes ball-fussion-ball4 { 0% { opacity: .35; } 50% { top: -100%; left: -100%; opacity: 1; } 100% { top: 0; left: 50%; z-index: 1; opacity: .35; } }
package logbook.data; import java.io.BufferedReader; import java.io.Closeable; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.net.URLClassLoader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; import javax.annotation.CheckForNull; import javax.script.Invocable; import javax.script.ScriptEngine; import javax.script.ScriptEngineManager; import javax.script.ScriptException; import logbook.config.AppConfig; import org.apache.commons.io.FilenameUtils; /** * スクリプトを読み込みEventListenerの実装を取得する * */ public final class ScriptLoader implements Closeable { /** ClassLoader */ private final URLClassLoader classLoader; /** ScriptEngineManager */ private final ScriptEngineManager manager; /** * コンストラクター */ public ScriptLoader() { this.classLoader = URLClassLoader.newInstance(this.getLibraries()); this.manager = new ScriptEngineManager(this.classLoader); } /** * スクリプトを読み込みEventListenerの実装を取得する<br> * * @param script スクリプト * @return スクリプトにより実装されたEventListener、スクリプトエンジンが見つからない、もしくはコンパイル済み関数がEventListenerを実装しない場合null * @throws IOException * @throws ScriptException */ @CheckForNull public EventListener getEventListener(Path script) throws IOException, ScriptException { try (BufferedReader reader = Files.newBufferedReader(script, StandardCharsets.UTF_8)) { // 拡張子からScriptEngineを取得 String ext = FilenameUtils.getExtension(script.toString()); ScriptEngine engine = this.manager.getEngineByExtension(ext); if (engine != null) { // eval engine.eval(reader); // 実装を取得 EventListener listener = ((Invocable) engine).getInterface(EventListener.class); if (listener != null) { return new ScriptEventAdapter(listener, script); } } return null; } } /** * ScriptEngineManagerで使用する追加のライブラリ * * @return ライブラリ */ public URL[] getLibraries() { String[] engines = AppConfig.get().getScriptEngines(); List<URL> libs = new ArrayList<>(); for (String engine : engines) { Path path = Paths.get(engine); if (Files.isReadable(path)) { try { libs.add(path.toUri().toURL()); } catch (MalformedURLException e) { // ここに入るパターンはないはず e.printStackTrace(); } } } return libs.toArray(new URL[libs.size()]); } @Override public void close() throws IOException { this.classLoader.close(); } }
{% extends "layout.html" %} {% block body %} <title>All Events - Media Services</title> <form id="adminForm" action="" method=post> <div class="container"> <table class="table"> <thead> <td> <ul class="nav nav-pills"> <li class="nav-item"> <a class="nav-link active" href="#">Upcoming Events</a> </li> <li class="nav-item"> <a class="nav-link" href="{{ url_for('past') }}">Past Events</a> </li> </ul> </td> <td> <button type="button" class="btn btn-outline-secondary" onclick="toggleSignUps()"> <span id="signUpText" class="text-muted"> Please Wait...</span> </button> </td> <td style="text-align:right"> <a href="{{ url_for('new') }}" class="btn btn-success"> <i class="fa fa-plus" aria-hidden="true"></i> New Event </a> </td> </thead> </table> <table class="table table-hover"> {% block events %}{% endblock %} </table> <!-- bottom buttons --> </div> </form> <script> currentPage = "edit"; $(window).on('load', function(){ socket.emit("getSignUps") }) function lockEvent(event) { socket.emit("lockEvent", String(event)); document.getElementById("lock_"+event).innerHTML = "Please Wait..."; } function toggleSignUps() { socket.emit("toggleSignUps"); document.getElementById("signUpText").innerHTML = "Please Wait..."; } socket.on('eventLock', function(data) { if (data.locked) { document.getElementById("lock_"+data.event).setAttribute("class", "btn btn-sm btn-danger"); document.getElementById("lock_"+data.event).innerHTML = "<i class=\"fa fa-lock\"> </i> Locked"; } else { document.getElementById("lock_"+data.event).setAttribute("class", "btn btn-sm btn-default"); document.getElementById("lock_"+data.event).innerHTML = "<i class=\"fa fa-unlock\"> </i> Unlocked"; } }); socket.on('signUpsAvailable', function(data) { if (data.available) { document.getElementById("signUpText").setAttribute("class", "text-success"); document.getElementById("signUpText").innerHTML = "<i id=\"signUpIcon\" class=\"fa fa-toggle-on\" aria-hidden=\"true\"></i> Sign-Ups Open"; } else { document.getElementById("signUpText").setAttribute("class", "text-danger"); document.getElementById("signUpText").innerHTML = "<i id=\"signUpIcon\" class=\"fa fa-toggle-off\" aria-hidden=\"true\"></i> Sign-Ups Closed"; } }); </script> {% endblock %}
/* * Read and write JSON. * * Copyright (c) 2014 Marko Kreen * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <usual/json.h> #include <usual/cxextra.h> #include <usual/cbtree.h> #include <usual/misc.h> #include <usual/utf8.h> #include <usual/ctype.h> #include <usual/bytemap.h> #include <usual/string.h> #include <math.h> #define TYPE_BITS 3 #define TYPE_MASK ((1 << TYPE_BITS) - 1) #define UNATTACHED ((struct JsonValue *)(1 << TYPE_BITS)) #define JSON_MAX_KEY (1024*1024) #define NUMBER_BUF 100 #define JSON_MAXINT ((1LL << 53) - 1) #define JSON_MININT (-(1LL << 53) + 1) /* * Common struct for all JSON values */ struct JsonValue { /* actual value for simple types */ union { double v_float; /* float */ int64_t v_int; /* int */ bool v_bool; /* bool */ size_t v_size; /* str/list/dict */ } u; /* pointer to next elem and type in low bits */ uintptr_t v_next_and_type; }; /* * List container. */ struct ValueList { struct JsonValue *first; struct JsonValue *last; struct JsonValue **array; }; /* * Extra data for list/dict. */ struct JsonContainer { /* parent container */ struct JsonValue *c_parent; /* main context for child alloc */ struct JsonContext *c_ctx; /* child elements */ union { struct CBTree *c_dict; struct ValueList c_list; } u; }; #define DICT_EXTRA (offsetof(struct JsonContainer, u.c_dict) + sizeof(struct CBTree *)) #define LIST_EXTRA (sizeof(struct JsonContainer)) /* * Allocation context. */ struct JsonContext { CxMem *pool; unsigned int options; /* parse state */ struct JsonValue *parent; struct JsonValue *cur_key; struct JsonValue *top; const char *lasterr; char errbuf[128]; int64_t linenr; }; struct RenderState { struct MBuf *dst; unsigned int options; }; /* * Parser states */ enum ParseState { S_INITIAL_VALUE = 1, S_LIST_VALUE, S_LIST_VALUE_OR_CLOSE, S_LIST_COMMA_OR_CLOSE, S_DICT_KEY, S_DICT_KEY_OR_CLOSE, S_DICT_COLON, S_DICT_VALUE, S_DICT_COMMA_OR_CLOSE, S_PARENT, S_DONE, MAX_STATES, }; /* * Tokens that change state. */ enum TokenTypes { T_STRING, T_OTHER, T_COMMA, T_COLON, T_OPEN_DICT, T_OPEN_LIST, T_CLOSE_DICT, T_CLOSE_LIST, MAX_TOKENS }; /* * 4-byte ints for small string tokens. */ #define C_NULL FOURCC('n','u','l','l') #define C_TRUE FOURCC('t','r','u','e') #define C_ALSE FOURCC('a','l','s','e') /* * Signature for render functions. */ typedef bool (*render_func_t)(struct RenderState *rs, struct JsonValue *jv); static bool render_any(struct RenderState *rs, struct JsonValue *jv); /* * Header manipulation */ static inline enum JsonValueType get_type(struct JsonValue *jv) { return jv->v_next_and_type & TYPE_MASK; } static inline bool has_type(struct JsonValue *jv, enum JsonValueType type) { if (!jv) return false; return get_type(jv) == type; } static inline struct JsonValue *get_next(struct JsonValue *jv) { return (struct JsonValue *)(jv->v_next_and_type & ~(uintptr_t)TYPE_MASK); } static inline void set_next(struct JsonValue *jv, struct JsonValue *next) { jv->v_next_and_type = (uintptr_t)next | get_type(jv); } static inline bool is_unattached(struct JsonValue *jv) { return get_next(jv) == UNATTACHED; } static inline void *get_extra(struct JsonValue *jv) { return (void *)(jv + 1); } static inline char *get_cstring(struct JsonValue *jv) { enum JsonValueType type = get_type(jv); if (type != JSON_STRING) return NULL; return get_extra(jv); } /* * Collection header manipulation. */ static inline struct JsonContainer *get_container(struct JsonValue *jv) { enum JsonValueType type = get_type(jv); if (type != JSON_DICT && type != JSON_LIST) return NULL; return get_extra(jv); } static inline void set_parent(struct JsonValue *jv, struct JsonValue *parent) { struct JsonContainer *c = get_container(jv); if (c) c->c_parent = parent; } static inline struct JsonContext *get_context(struct JsonValue *jv) { struct JsonContainer *c = get_container(jv); return c ? c->c_ctx : NULL; } static inline struct CBTree *get_dict_tree(struct JsonValue *jv) { struct JsonContainer *c; if (has_type(jv, JSON_DICT)) { c = get_container(jv); return c->u.c_dict; } return NULL; } static inline struct ValueList *get_list_vlist(struct JsonValue *jv) { struct JsonContainer *c; if (has_type(jv, JSON_LIST)) { c = get_container(jv); return &c->u.c_list; } return NULL; } /* * Random helpers */ /* copy and return final pointer */ static inline char *plain_copy(char *dst, const char *src, const char *endptr) { if (src < endptr) { memcpy(dst, src, endptr - src); return dst + (endptr - src); } return dst; } /* error message on context */ _PRINTF(2,0) static void format_err(struct JsonContext *ctx, const char *errmsg, va_list ap) { char buf[119]; if (ctx->lasterr) return; vsnprintf(buf, sizeof(buf), errmsg, ap); snprintf(ctx->errbuf, sizeof(ctx->errbuf), "Line #%" PRIi64 ": %s", ctx->linenr, buf); ctx->lasterr = ctx->errbuf; } /* set message and return false */ _PRINTF(2,3) static bool err_false(struct JsonContext *ctx, const char *errmsg, ...) { va_list ap; va_start(ap, errmsg); format_err(ctx, errmsg, ap); va_end(ap); return false; } /* set message and return NULL */ _PRINTF(2,3) static void *err_null(struct JsonContext *ctx, const char *errmsg, ...) { va_list ap; va_start(ap, errmsg); format_err(ctx, errmsg, ap); va_end(ap); return NULL; } /* callback for cbtree, returns key bytes */ static size_t get_key_data_cb(void *dictptr, void *keyptr, const void **dst_p) { struct JsonValue *key = keyptr; *dst_p = get_cstring(key); return key->u.v_size; } /* add elemnt to list */ static void real_list_append(struct JsonValue *list, struct JsonValue *elem) { struct ValueList *vlist; vlist = get_list_vlist(list); if (vlist->last) { set_next(vlist->last, elem); } else { vlist->first = elem; } vlist->last = elem; vlist->array = NULL; list->u.v_size++; } /* add key to tree */ static bool real_dict_add_key(struct JsonContext *ctx, struct JsonValue *dict, struct JsonValue *key) { struct CBTree *tree; tree = get_dict_tree(dict); if (!tree) return err_false(ctx, "Expect dict"); if (json_value_size(key) > JSON_MAX_KEY) return err_false(ctx, "Too large key"); dict->u.v_size++; if (!cbtree_insert(tree, key)) return err_false(ctx, "Key insertion failed"); return true; } /* create basic value struct, link to stuctures */ static struct JsonValue *mk_value(struct JsonContext *ctx, enum JsonValueType type, size_t extra, bool attach) { struct JsonValue *val; struct JsonContainer *col = NULL; if (!ctx) return NULL; val = cx_alloc(ctx->pool, sizeof(struct JsonValue) + extra); if (!val) return err_null(ctx, "No memory"); if ((uintptr_t)val & TYPE_MASK) return err_null(ctx, "Unaligned pointer"); /* initial value */ val->v_next_and_type = type; val->u.v_int = 0; if (type == JSON_DICT || type == JSON_LIST) { col = get_container(val); col->c_ctx = ctx; col->c_parent = NULL; if (type == JSON_DICT) { col->u.c_dict = cbtree_create(get_key_data_cb, NULL, val, ctx->pool); if (!col->u.c_dict) return err_null(ctx, "No memory"); } else { memset(&col->u.c_list, 0, sizeof(col->u.c_list)); } } /* independent JsonValue? */ if (!attach) { set_next(val, UNATTACHED); return val; } /* attach to parent */ if (col) col->c_parent = ctx->parent; /* attach to previous value */ if (has_type(ctx->parent, JSON_DICT)) { if (ctx->cur_key) { set_next(ctx->cur_key, val); ctx->cur_key = NULL; } else { ctx->cur_key = val; } } else if (has_type(ctx->parent, JSON_LIST)) { real_list_append(ctx->parent, val); } else if (!ctx->top) { ctx->top = val; } else { return err_null(ctx, "Only one top element is allowed"); } return val; } static void prepare_array(struct JsonValue *list) { struct JsonContainer *c; struct JsonValue *val; struct ValueList *vlist; size_t i; vlist = get_list_vlist(list); if (vlist->array) return; c = get_container(list); vlist->array = cx_alloc(c->c_ctx->pool, list->u.v_size * sizeof(struct JsonValue *)); if (!vlist->array) return; val = vlist->first; for (i = 0; i < list->u.v_size && val; i++) { vlist->array[i] = val; val = get_next(val); } } /* * Parsing code starts */ /* create and change context */ static bool open_container(struct JsonContext *ctx, enum JsonValueType type, unsigned int extra) { struct JsonValue *jv; jv = mk_value(ctx, type, extra, true); if (!jv) return false; ctx->parent = jv; ctx->cur_key = NULL; return true; } /* close and change context */ static enum ParseState close_container(struct JsonContext *ctx, enum ParseState state) { struct JsonContainer *c; if (state != S_PARENT) return (int)err_false(ctx, "close_container bug"); c = get_container(ctx->parent); if (!c) return (int)err_false(ctx, "invalid parent"); ctx->parent = c->c_parent; ctx->cur_key = NULL; if (has_type(ctx->parent, JSON_DICT)) { return S_DICT_COMMA_OR_CLOSE; } else if (has_type(ctx->parent, JSON_LIST)) { return S_LIST_COMMA_OR_CLOSE; } return S_DONE; } /* parse 4-char token */ static bool parse_char4(struct JsonContext *ctx, const char **src_p, const char *end, uint32_t t_exp, enum JsonValueType type, bool val) { const char *src; uint32_t t_got; struct JsonValue *jv; src = *src_p; if (src + 4 > end) return err_false(ctx, "Unexpected end of token"); memcpy(&t_got, src, 4); if (t_exp != t_got) return err_false(ctx, "Invalid token"); jv = mk_value(ctx, type, 0, true); if (!jv) return false; jv->u.v_bool = val; *src_p += 4; return true; } /* parse int or float */ static bool parse_number(struct JsonContext *ctx, const char **src_p, const char *end) { const char *start, *src; enum JsonValueType type = JSON_INT; char *tokend = NULL; char buf[NUMBER_BUF]; size_t len; struct JsonValue *jv; double v_float = 0; int64_t v_int = 0; /* scan & copy */ start = src = *src_p; for (; src < end; src++) { if (*src >= '0' && *src <= '9') { } else if (*src == '+' || *src == '-') { } else if (*src == '.' || *src == 'e' || *src == 'E') { type = JSON_FLOAT; } else { break; } } len = src - start; if (len >= NUMBER_BUF) goto failed; memcpy(buf, start, len); buf[len] = 0; /* now parse */ errno = 0; tokend = buf; if (type == JSON_FLOAT) { v_float = strtod_dot(buf, &tokend); if (*tokend != 0 || errno || !isfinite(v_float)) goto failed; } else if (len < 8) { v_int = strtol(buf, &tokend, 10); if (*tokend != 0 || errno) goto failed; } else { v_int = strtoll(buf, &tokend, 10); if (*tokend != 0 || errno || v_int < JSON_MININT || v_int > JSON_MAXINT) goto failed; } /* create value struct */ jv = mk_value(ctx, type, 0, true); if (!jv) return false; if (type == JSON_FLOAT) { jv->u.v_float = v_float; } else { jv->u.v_int = v_int; } *src_p = src; return true; failed: if (!errno) errno = EINVAL; return err_false(ctx, "Number parse failed"); } /* * String parsing */ static int parse_hex(const char *s, const char *end) { int v = 0, c, i, x; if (s + 4 > end) return -1; for (i = 0; i < 4; i++) { c = s[i]; if (c >= '0' && c <= '9') { x = c - '0'; } else if (c >= 'a' && c <= 'f') { x = c - 'a' + 10; } else if (c >= 'A' && c <= 'F') { x = c - 'A' + 10; } else { return -1; } v = (v << 4) | x; } return v; } /* process \uXXXX escapes, merge surrogates */ static bool parse_uescape(struct JsonContext *ctx, char **dst_p, char *dstend, const char **src_p, const char *end) { int c, c2; const char *src = *src_p; c = parse_hex(src, end); if (c <= 0) return err_false(ctx, "Invalid hex escape"); src += 4; if (c >= 0xD800 && c <= 0xDFFF) { /* first surrogate */ if (c >= 0xDC00) return err_false(ctx, "Invalid UTF16 escape"); if (src + 6 > end) return err_false(ctx, "Invalid UTF16 escape"); /* second surrogate */ if (src[0] != '\\' || src[1] != 'u') return err_false(ctx, "Invalid UTF16 escape"); c2 = parse_hex(src + 2, end); if (c2 < 0xDC00 || c2 > 0xDFFF) return err_false(ctx, "Invalid UTF16 escape"); c = 0x10000 + ((c & 0x3FF) << 10) + (c2 & 0x3FF); src += 6; } /* now write char */ if (!utf8_put_char(c, dst_p, dstend)) return err_false(ctx, "Invalid UTF16 escape"); *src_p = src; return true; } #define meta_string(c) (((c) == '"' || (c) == '\\' || (c) == '\0' || \ (c) == '\n' || ((c) & 0x80) != 0) ? 1 : 0) static const uint8_t string_examine_chars[] = INTMAP256_CONST(meta_string); /* look for string end, validate contents */ static bool scan_string(struct JsonContext *ctx, const char *src, const char *end, const char **str_end_p, bool *hasesc_p, int64_t *nlines_p) { bool hasesc = false; int64_t lines = 0; unsigned int n; bool check_utf8 = true; if (ctx->options & JSON_PARSE_IGNORE_ENCODING) check_utf8 = false; while (src < end) { if (!string_examine_chars[(uint8_t)*src]) { src++; } else if (*src == '"') { /* string end */ *hasesc_p = hasesc; *str_end_p = src; *nlines_p = lines; return true; } else if (*src == '\\') { hasesc = true; src++; if (src < end && (*src == '\\' || *src == '"')) src++; } else if (*src & 0x80) { n = utf8_validate_seq(src, end); if (n) { src += n; } else if (check_utf8) { goto badutf; } else { src++; } } else if (*src == '\n') { lines++; src++; } else { goto badutf; } } return err_false(ctx, "Unexpected end of string"); badutf: return err_false(ctx, "Invalid UTF8 sequence"); } /* string boundaries are known, copy and unescape */ static char *process_escapes(struct JsonContext *ctx, const char *src, const char *end, char *dst, char *dstend) { const char *esc; /* process escapes */ while (src < end) { esc = memchr(src, '\\', end - src); if (!esc) { dst = plain_copy(dst, src, end); break; } dst = plain_copy(dst, src, esc); src = esc + 1; switch (*src++) { case '"': *dst++ = '"'; break; case '\\': *dst++ = '\\'; break; case '/': *dst++ = '/'; break; case 'b': *dst++ = '\b'; break; case 'f': *dst++ = '\f'; break; case 'n': *dst++ = '\n'; break; case 'r': *dst++ = '\r'; break; case 't': *dst++ = '\t'; break; case 'u': if (!parse_uescape(ctx, &dst, dstend, &src, end)) return NULL; break; default: return err_null(ctx, "Invalid escape code"); } } return dst; } /* 2-phase string processing */ static bool parse_string(struct JsonContext *ctx, const char **src_p, const char *end) { const char *start, *strend = NULL; bool hasesc = false; char *dst, *dstend; size_t len; struct JsonValue *jv; int64_t lines = 0; /* find string boundaries, validate */ start = *src_p; if (!scan_string(ctx, start, end, &strend, &hasesc, &lines)) return false; /* create value struct */ len = strend - start; jv = mk_value(ctx, JSON_STRING, len + 1, true); if (!jv) return false; dst = get_cstring(jv); dstend = dst + len; /* copy & process escapes */ if (hasesc) { dst = process_escapes(ctx, start, strend, dst, dstend); if (!dst) return false; } else { dst = plain_copy(dst, start, strend); } *dst = '\0'; jv->u.v_size = dst - get_cstring(jv); ctx->linenr += lines; *src_p = strend + 1; return true; } /* * Helpers for relaxed parsing */ static bool skip_comment(struct JsonContext *ctx, const char **src_p, const char *end) { const char *s, *start; char c; size_t lnr; s = start = *src_p; if (s >= end) return false; c = *s++; if (c == '/') { s = memchr(s, '\n', end - s); if (s) { ctx->linenr++; *src_p = s + 1; } else { *src_p = end; } return true; } else if (c == '*') { for (lnr = 0; s + 2 <= end; s++) { if (s[0] == '*' && s[1] == '/') { ctx->linenr += lnr; *src_p = s + 2; return true; } else if (s[0] == '\n') { lnr++; } } } return false; } static bool skip_extra_comma(struct JsonContext *ctx, const char **src_p, const char *end, enum ParseState state) { bool skip = false; const char *src = *src_p; while (src < end && isspace(*src)) { if (*src == '\n') ctx->linenr++; src++; } if (src < end) { if (*src == '}') { if (state == S_DICT_COMMA_OR_CLOSE || state == S_DICT_KEY_OR_CLOSE) skip = true; } else if (*src == ']') { if (state == S_LIST_COMMA_OR_CLOSE || state == S_LIST_VALUE_OR_CLOSE) skip = true; } } *src_p = src; return skip; } /* * Main parser */ /* oldstate + token -> newstate */ static const unsigned char STATE_STEPS[MAX_STATES][MAX_TOKENS] = { [S_INITIAL_VALUE] = { [T_OPEN_LIST] = S_LIST_VALUE_OR_CLOSE, [T_OPEN_DICT] = S_DICT_KEY_OR_CLOSE, [T_STRING] = S_DONE, [T_OTHER] = S_DONE }, [S_LIST_VALUE] = { [T_OPEN_LIST] = S_LIST_VALUE_OR_CLOSE, [T_OPEN_DICT] = S_DICT_KEY_OR_CLOSE, [T_STRING] = S_LIST_COMMA_OR_CLOSE, [T_OTHER] = S_LIST_COMMA_OR_CLOSE }, [S_LIST_VALUE_OR_CLOSE] = { [T_OPEN_LIST] = S_LIST_VALUE_OR_CLOSE, [T_OPEN_DICT] = S_DICT_KEY_OR_CLOSE, [T_STRING] = S_LIST_COMMA_OR_CLOSE, [T_OTHER] = S_LIST_COMMA_OR_CLOSE, [T_CLOSE_LIST] = S_PARENT }, [S_LIST_COMMA_OR_CLOSE] = { [T_COMMA] = S_LIST_VALUE, [T_CLOSE_LIST] = S_PARENT }, [S_DICT_KEY] = { [T_STRING] = S_DICT_COLON }, [S_DICT_KEY_OR_CLOSE] = { [T_STRING] = S_DICT_COLON, [T_CLOSE_DICT] = S_PARENT }, [S_DICT_COLON] = { [T_COLON] = S_DICT_VALUE }, [S_DICT_VALUE] = { [T_OPEN_LIST] = S_LIST_VALUE_OR_CLOSE, [T_OPEN_DICT] = S_DICT_KEY_OR_CLOSE, [T_STRING] = S_DICT_COMMA_OR_CLOSE, [T_OTHER] = S_DICT_COMMA_OR_CLOSE }, [S_DICT_COMMA_OR_CLOSE] = { [T_COMMA] = S_DICT_KEY, [T_CLOSE_DICT] = S_PARENT }, }; #define MAPSTATE(state, tok) do { \ int newstate = STATE_STEPS[state][tok]; \ if (!newstate) \ return err_false(ctx, "Unexpected symbol: '%c'", c); \ state = newstate; \ } while (0) /* actual parser */ static bool parse_tokens(struct JsonContext *ctx, const char *src, const char *end) { char c; enum ParseState state = S_INITIAL_VALUE; bool relaxed = ctx->options & JSON_PARSE_RELAXED; while (src < end) { c = *src++; switch (c) { case '\n': ctx->linenr++; case ' ': case '\t': case '\r': case '\f': case '\v': /* common case - many spaces */ while (src < end && *src == ' ') src++; break; case '"': MAPSTATE(state, T_STRING); if (!parse_string(ctx, &src, end)) goto failed; break; case 'n': MAPSTATE(state, T_OTHER); src--; if (!parse_char4(ctx, &src, end, C_NULL, JSON_NULL, 0)) goto failed; continue; case 't': MAPSTATE(state, T_OTHER); src--; if (!parse_char4(ctx, &src, end, C_TRUE, JSON_BOOL, 1)) goto failed; break; case 'f': MAPSTATE(state, T_OTHER); if (!parse_char4(ctx, &src, end, C_ALSE, JSON_BOOL, 0)) goto failed; break; case '-': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': MAPSTATE(state, T_OTHER); src--; if (!parse_number(ctx, &src, end)) goto failed; break; case '[': MAPSTATE(state, T_OPEN_LIST); if (!open_container(ctx, JSON_LIST, LIST_EXTRA)) goto failed; break; case '{': MAPSTATE(state, T_OPEN_DICT); if (!open_container(ctx, JSON_DICT, DICT_EXTRA)) goto failed; break; case ']': MAPSTATE(state, T_CLOSE_LIST); state = close_container(ctx, state); if (!state) goto failed; break; case '}': MAPSTATE(state, T_CLOSE_DICT); state = close_container(ctx, state); if (!state) goto failed; break; case ':': MAPSTATE(state, T_COLON); if (!real_dict_add_key(ctx, ctx->parent, ctx->cur_key)) goto failed; break; case ',': if (relaxed && skip_extra_comma(ctx, &src, end, state)) continue; MAPSTATE(state, T_COMMA); break; case '/': if (relaxed && skip_comment(ctx, &src, end)) continue; /* fallthrough */ default: return err_false(ctx, "Invalid symbol: '%c'", c); } } if (state != S_DONE) return err_false(ctx, "Container still open"); return true; failed: return false; } /* parser public api */ struct JsonValue *json_parse(struct JsonContext *ctx, const char *json, size_t len) { const char *end = json + len; /* reset parser */ ctx->linenr = 1; ctx->parent = NULL; ctx->cur_key = NULL; ctx->lasterr = NULL; ctx->top = NULL; if (!parse_tokens(ctx, json, end)) return NULL; return ctx->top; } /* * Render value as JSON string. */ static bool render_null(struct RenderState *rs, struct JsonValue *jv) { return mbuf_write(rs->dst, "null", 4); } static bool render_bool(struct RenderState *rs, struct JsonValue *jv) { if (jv->u.v_bool) return mbuf_write(rs->dst, "true", 4); return mbuf_write(rs->dst, "false", 5); } static bool render_int(struct RenderState *rs, struct JsonValue *jv) { char buf[NUMBER_BUF]; int len; len = snprintf(buf, sizeof(buf), "%" PRIi64, jv->u.v_int); if (len < 0 || len >= NUMBER_BUF) return false; return mbuf_write(rs->dst, buf, len); } static bool render_float(struct RenderState *rs, struct JsonValue *jv) { char buf[NUMBER_BUF + 2]; int len; len = dtostr_dot(buf, NUMBER_BUF, jv->u.v_float); if (len < 0 || len >= NUMBER_BUF) return false; if (!memchr(buf, '.', len) && !memchr(buf, 'e', len)) { buf[len++] = '.'; buf[len++] = '0'; } return mbuf_write(rs->dst, buf, len); } static bool escape_char(struct MBuf *dst, unsigned int c) { char ec; char buf[10]; /* start escape */ if (!mbuf_write_byte(dst, '\\')) return false; /* escape same char */ if (c == '"' || c == '\\') return mbuf_write_byte(dst, c); /* low-ascii mess */ switch (c) { case '\b': ec = 'b'; break; case '\f': ec = 'f'; break; case '\n': ec = 'n'; break; case '\r': ec = 'r'; break; case '\t': ec = 't'; break; default: snprintf(buf, sizeof(buf), "u%04x", c); return mbuf_write(dst, buf, 5); } return mbuf_write_byte(dst, ec); } static bool render_string(struct RenderState *rs, struct JsonValue *jv) { const char *s, *last; const char *val = get_cstring(jv); size_t len = jv->u.v_size; const char *end = val + len; unsigned int c; /* start quote */ if (!mbuf_write_byte(rs->dst, '"')) return false; for (s = last = val; s < end; s++) { if (*s == '"' || *s == '\\' || (unsigned char)*s < 0x20 || /* Valid in JSON, but not in JS: \u2028 - Line separator \u2029 - Paragraph separator */ ((unsigned char)s[0] == 0xE2 && (unsigned char)s[1] == 0x80 && ((unsigned char)s[2] == 0xA8 || (unsigned char)s[2] == 0xA9))) { /* flush */ if (last < s) { if (!mbuf_write(rs->dst, last, s - last)) return false; } if ((unsigned char)s[0] == 0xE2) { c = 0x2028 + ((unsigned char)s[2] - 0xA8); last = s + 3; } else { c = (unsigned char)*s; last = s + 1; } /* output escaped char */ if (!escape_char(rs->dst, c)) return false; } } /* flush */ if (last < s) { if (!mbuf_write(rs->dst, last, s - last)) return false; } /* final quote */ if (!mbuf_write_byte(rs->dst, '"')) return false; return true; } /* * Render complex values */ struct ElemWriterState { struct RenderState *rs; char sep; }; static bool list_elem_writer(void *arg, struct JsonValue *elem) { struct ElemWriterState *state = arg; if (state->sep && !mbuf_write_byte(state->rs->dst, state->sep)) return false; state->sep = ','; return render_any(state->rs, elem); } static bool render_list(struct RenderState *rs, struct JsonValue *list) { struct ElemWriterState state; state.rs = rs; state.sep = 0; if (!mbuf_write_byte(rs->dst, '[')) return false; if (!json_list_iter(list, list_elem_writer, &state)) return false; if (!mbuf_write_byte(rs->dst, ']')) return false; return true; } static bool dict_elem_writer(void *ctx, struct JsonValue *key, struct JsonValue *val) { struct ElemWriterState *state = ctx; if (state->sep && !mbuf_write_byte(state->rs->dst, state->sep)) return false; state->sep = ','; if (!render_any(state->rs, key)) return false; if (!mbuf_write_byte(state->rs->dst, ':')) return false; return render_any(state->rs, val); } static bool render_dict(struct RenderState *rs, struct JsonValue *dict) { struct ElemWriterState state; state.rs = rs; state.sep = 0; if (!mbuf_write_byte(rs->dst, '{')) return false; if (!json_dict_iter(dict, dict_elem_writer, &state)) return false; if (!mbuf_write_byte(rs->dst, '}')) return false; return true; } static bool render_invalid(struct RenderState *rs, struct JsonValue *jv) { return false; } /* * Public api */ static bool render_any(struct RenderState *rs, struct JsonValue *jv) { static const render_func_t rfunc_map[] = { render_invalid, render_null, render_bool, render_int, render_float, render_string, render_list, render_dict, }; return rfunc_map[get_type(jv)](rs, jv); } bool json_render(struct MBuf *dst, struct JsonValue *jv) { struct RenderState rs; rs.dst = dst; rs.options = 0; return render_any(&rs, jv); } /* * Examine single value */ enum JsonValueType json_value_type(struct JsonValue *jv) { return get_type(jv); } size_t json_value_size(struct JsonValue *jv) { if (has_type(jv, JSON_STRING) || has_type(jv, JSON_LIST) || has_type(jv, JSON_DICT)) return jv->u.v_size; return 0; } bool json_value_as_bool(struct JsonValue *jv, bool *dst_p) { if (!has_type(jv, JSON_BOOL)) return false; *dst_p = jv->u.v_bool; return true; } bool json_value_as_int(struct JsonValue *jv, int64_t *dst_p) { if (!has_type(jv, JSON_INT)) return false; *dst_p = jv->u.v_int; return true; } bool json_value_as_float(struct JsonValue *jv, double *dst_p) { if (!has_type(jv, JSON_FLOAT)) { if (has_type(jv, JSON_INT)) { *dst_p = jv->u.v_int; return true; } return false; } *dst_p = jv->u.v_float; return true; } bool json_value_as_string(struct JsonValue *jv, const char **dst_p, size_t *size_p) { if (!has_type(jv, JSON_STRING)) return false; *dst_p = get_cstring(jv); if (size_p) *size_p = jv->u.v_size; return true; } /* * Load value from dict. */ static int dict_getter(struct JsonValue *dict, const char *key, unsigned int klen, struct JsonValue **val_p, enum JsonValueType req_type, bool req_value) { struct JsonValue *val, *kjv; struct CBTree *tree; tree = get_dict_tree(dict); if (!tree) return false; kjv = cbtree_lookup(tree, key, klen); if (!kjv) { if (req_value) return false; *val_p = NULL; return true; } val = get_next(kjv); if (!req_value && json_value_is_null(val)) { *val_p = NULL; return true; } if (!has_type(val, req_type)) return false; *val_p = val; return true; } bool json_dict_get_value(struct JsonValue *dict, const char *key, struct JsonValue **val_p) { struct CBTree *tree; struct JsonValue *kjv; size_t klen; tree = get_dict_tree(dict); if (!tree) return false; klen = strlen(key); kjv = cbtree_lookup(tree, key, klen); if (!kjv) return false; *val_p = get_next(kjv); return true; } bool json_dict_is_null(struct JsonValue *dict, const char *key) { struct JsonValue *val; if (!json_dict_get_value(dict, key, &val)) return true; return has_type(val, JSON_NULL); } bool json_dict_get_bool(struct JsonValue *dict, const char *key, bool *dst_p) { struct JsonValue *val; if (!dict_getter(dict, key, strlen(key), &val, JSON_BOOL, true)) return false; return json_value_as_bool(val, dst_p); } bool json_dict_get_int(struct JsonValue *dict, const char *key, int64_t *dst_p) { struct JsonValue *val; if (!dict_getter(dict, key, strlen(key), &val, JSON_INT, true)) return false; return json_value_as_int(val, dst_p); } bool json_dict_get_float(struct JsonValue *dict, const char *key, double *dst_p) { struct JsonValue *val; if (!dict_getter(dict, key, strlen(key), &val, JSON_FLOAT, true)) return false; return json_value_as_float(val, dst_p); } bool json_dict_get_string(struct JsonValue *dict, const char *key, const char **dst_p, size_t *len_p) { struct JsonValue *val; if (!dict_getter(dict, key, strlen(key), &val, JSON_STRING, true)) return false; return json_value_as_string(val, dst_p, len_p); } bool json_dict_get_list(struct JsonValue *dict, const char *key, struct JsonValue **dst_p) { return dict_getter(dict, key, strlen(key), dst_p, JSON_LIST, true); } bool json_dict_get_dict(struct JsonValue *dict, const char *key, struct JsonValue **dst_p) { return dict_getter(dict, key, strlen(key), dst_p, JSON_DICT, true); } /* * Load optional dict element. */ bool json_dict_get_opt_bool(struct JsonValue *dict, const char *key, bool *dst_p) { struct JsonValue *val; if (!dict_getter(dict, key, strlen(key), &val, JSON_BOOL, false)) return false; return !val || json_value_as_bool(val, dst_p); } bool json_dict_get_opt_int(struct JsonValue *dict, const char *key, int64_t *dst_p) { struct JsonValue *val; if (!dict_getter(dict, key, strlen(key), &val, JSON_INT, false)) return false; return !val || json_value_as_int(val, dst_p); } bool json_dict_get_opt_float(struct JsonValue *dict, const char *key, double *dst_p) { struct JsonValue *val; if (!dict_getter(dict, key, strlen(key), &val, JSON_FLOAT, false)) return false; return !val || json_value_as_float(val, dst_p); } bool json_dict_get_opt_string(struct JsonValue *dict, const char *key, const char **dst_p, size_t *len_p) { struct JsonValue *val; if (!dict_getter(dict, key, strlen(key), &val, JSON_STRING, false)) return false; return !val || json_value_as_string(val, dst_p, len_p); } bool json_dict_get_opt_list(struct JsonValue *dict, const char *key, struct JsonValue **dst_p) { struct JsonValue *val; if (!dict_getter(dict, key, strlen(key), &val, JSON_LIST, false)) return false; if (val) *dst_p = val; return true; } bool json_dict_get_opt_dict(struct JsonValue *dict, const char *key, struct JsonValue **dst_p) { struct JsonValue *val; if (!dict_getter(dict, key, strlen(key), &val, JSON_DICT, false)) return false; if (val) *dst_p = val; return true; } /* * Load value from list. */ bool json_list_get_value(struct JsonValue *list, size_t index, struct JsonValue **val_p) { struct JsonValue *val; struct ValueList *vlist; size_t i; vlist = get_list_vlist(list); if (!vlist) return false; if (index >= list->u.v_size) return false; if (!vlist->array && list->u.v_size > 10) prepare_array(list); /* direct fetch */ if (vlist->array) { *val_p = vlist->array[index]; return true; } /* walk */ val = vlist->first; for (i = 0; val; i++) { if (i == index) { *val_p = val; return true; } val = get_next(val); } return false; } bool json_list_is_null(struct JsonValue *list, size_t n) { struct JsonValue *jv; if (!json_list_get_value(list, n, &jv)) return true; return has_type(jv, JSON_NULL); } bool json_list_get_bool(struct JsonValue *list, size_t index, bool *val_p) { struct JsonValue *jv; if (!json_list_get_value(list, index, &jv)) return false; return json_value_as_bool(jv, val_p); } bool json_list_get_int(struct JsonValue *list, size_t index, int64_t *val_p) { struct JsonValue *jv; if (!json_list_get_value(list, index, &jv)) return false; return json_value_as_int(jv, val_p); } bool json_list_get_float(struct JsonValue *list, size_t index, double *val_p) { struct JsonValue *jv; if (!json_list_get_value(list, index, &jv)) return false; return json_value_as_float(jv, val_p); } bool json_list_get_string(struct JsonValue *list, size_t index, const char **val_p, size_t *len_p) { struct JsonValue *jv; if (!json_list_get_value(list, index, &jv)) return false; return json_value_as_string(jv, val_p, len_p); } bool json_list_get_list(struct JsonValue *list, size_t index, struct JsonValue **val_p) { struct JsonValue *jv; if (!json_list_get_value(list, index, &jv)) return false; if (!has_type(jv, JSON_LIST)) return false; *val_p = jv; return true; } bool json_list_get_dict(struct JsonValue *list, size_t index, struct JsonValue **val_p) { struct JsonValue *jv; if (!json_list_get_value(list, index, &jv)) return false; if (!has_type(jv, JSON_DICT)) return false; *val_p = jv; return true; } /* * Iterate over list and dict values. */ struct DictIterState { json_dict_iter_callback_f cb_func; void *cb_arg; }; static bool dict_iter_helper(void *arg, void *jv) { struct DictIterState *state = arg; struct JsonValue *key = jv; struct JsonValue *val = get_next(key); return state->cb_func(state->cb_arg, key, val); } bool json_dict_iter(struct JsonValue *dict, json_dict_iter_callback_f cb_func, void *cb_arg) { struct DictIterState state; struct CBTree *tree; tree = get_dict_tree(dict); if (!tree) return false; state.cb_func = cb_func; state.cb_arg = cb_arg; return cbtree_walk(tree, dict_iter_helper, &state); } bool json_list_iter(struct JsonValue *list, json_list_iter_callback_f cb_func, void *cb_arg) { struct JsonValue *elem; struct ValueList *vlist; vlist = get_list_vlist(list); if (!vlist) return false; for (elem = vlist->first; elem; elem = get_next(elem)) { if (!cb_func(cb_arg, elem)) return false; } return true; } /* * Create new values. */ struct JsonValue *json_new_null(struct JsonContext *ctx) { return mk_value(ctx, JSON_NULL, 0, false); } struct JsonValue *json_new_bool(struct JsonContext *ctx, bool val) { struct JsonValue *jv; jv = mk_value(ctx, JSON_BOOL, 0, false); if (jv) jv->u.v_bool = val; return jv; } struct JsonValue *json_new_int(struct JsonContext *ctx, int64_t val) { struct JsonValue *jv; if (val < JSON_MININT || val > JSON_MAXINT) { errno = ERANGE; return NULL; } jv = mk_value(ctx, JSON_INT, 0, false); if (jv) jv->u.v_int = val; return jv; } struct JsonValue *json_new_float(struct JsonContext *ctx, double val) { struct JsonValue *jv; /* check if value survives JSON roundtrip */ if (!isfinite(val)) return false; jv = mk_value(ctx, JSON_FLOAT, 0, false); if (jv) jv->u.v_float = val; return jv; } struct JsonValue *json_new_string(struct JsonContext *ctx, const char *val) { struct JsonValue *jv; size_t len; len = strlen(val); if (!utf8_validate_string(val, val + len)) return NULL; jv = mk_value(ctx, JSON_STRING, len + 1, false); if (jv) { memcpy(get_cstring(jv), val, len + 1); jv->u.v_size = len; } return jv; } struct JsonValue *json_new_list(struct JsonContext *ctx) { return mk_value(ctx, JSON_LIST, LIST_EXTRA, false); } struct JsonValue *json_new_dict(struct JsonContext *ctx) { return mk_value(ctx, JSON_DICT, DICT_EXTRA, false); } /* * Add to containers */ bool json_list_append(struct JsonValue *list, struct JsonValue *val) { if (!val) return false; if (!has_type(list, JSON_LIST)) return false; if (!is_unattached(val)) return false; set_parent(val, list); set_next(val, NULL); real_list_append(list, val); return true; } bool json_list_append_null(struct JsonValue *list) { struct JsonValue *v; v = json_new_null(get_context(list)); return json_list_append(list, v); } bool json_list_append_bool(struct JsonValue *list, bool val) { struct JsonValue *v; v = json_new_bool(get_context(list), val); return json_list_append(list, v); } bool json_list_append_int(struct JsonValue *list, int64_t val) { struct JsonValue *v; v = json_new_int(get_context(list), val); return json_list_append(list, v); } bool json_list_append_float(struct JsonValue *list, double val) { struct JsonValue *v; v = json_new_float(get_context(list), val); return json_list_append(list, v); } bool json_list_append_string(struct JsonValue *list, const char *val) { struct JsonValue *v; v = json_new_string(get_context(list), val); return json_list_append(list, v); } bool json_dict_put(struct JsonValue *dict, const char *key, struct JsonValue *val) { struct JsonValue *kjv; struct JsonContainer *c; if (!key || !val) return false; if (!has_type(dict, JSON_DICT)) return false; if (!is_unattached(val)) return false; c = get_container(dict); kjv = json_new_string(c->c_ctx, key); if (!kjv) return false; if (!real_dict_add_key(c->c_ctx, dict, kjv)) return false; set_next(kjv, val); set_next(val, NULL); set_parent(val, dict); return true; } bool json_dict_put_null(struct JsonValue *dict, const char *key) { struct JsonValue *v; v = json_new_null(get_context(dict)); return json_dict_put(dict, key, v); } bool json_dict_put_bool(struct JsonValue *dict, const char *key, bool val) { struct JsonValue *v; v = json_new_bool(get_context(dict), val); return json_dict_put(dict, key, v); } bool json_dict_put_int(struct JsonValue *dict, const char *key, int64_t val) { struct JsonValue *v; v = json_new_int(get_context(dict), val); return json_dict_put(dict, key, v); } bool json_dict_put_float(struct JsonValue *dict, const char *key, double val) { struct JsonValue *v; v = json_new_float(get_context(dict), val); return json_dict_put(dict, key, v); } bool json_dict_put_string(struct JsonValue *dict, const char *key, const char *val) { struct JsonValue *v; v = json_new_string(get_context(dict), val); return json_dict_put(dict, key, v); } /* * Main context management */ struct JsonContext *json_new_context(const void *cx, size_t initial_mem) { struct JsonContext *ctx; CxMem *pool; pool = cx_new_pool(cx, initial_mem, 8); if (!pool) return NULL; ctx = cx_alloc0(pool, sizeof(*ctx)); if (!ctx) { cx_destroy(pool); return NULL; } ctx->pool = pool; return ctx; } void json_free_context(struct JsonContext *ctx) { if (ctx) { CxMem *pool = ctx->pool; memset(ctx, 0, sizeof(*ctx)); cx_destroy(pool); } } const char *json_strerror(struct JsonContext *ctx) { return ctx->lasterr; } void json_set_options(struct JsonContext *ctx, unsigned int options) { ctx->options = options; }
b'Evaluate (6 - (-6 + 5)) + 14 + 0 + -2.\n'
/* * Generated by class-dump 3.3.4 (64 bit). * * class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2011 by Steve Nygard. */ #import "NSObject.h" @class NSString, NSURL; // Not exported @interface _GEORegionalResourceDownload : NSObject { NSString *_name; long long _type; NSURL *_url; NSString *_destinationPath; NSString *_expectedChecksum; } @property(copy, nonatomic) NSString *expectedChecksum; // @synthesize expectedChecksum=_expectedChecksum; @property(copy, nonatomic) NSString *destinationPath; // @synthesize destinationPath=_destinationPath; @property(copy, nonatomic) NSURL *url; // @synthesize url=_url; @property(nonatomic) long long type; // @synthesize type=_type; @property(copy, nonatomic) NSString *name; // @synthesize name=_name; - (void)dealloc; @end
b'-10 - (7 - -2 - (54 + -69))\n'
b'What is the value of (-7 - -1) + (-5 - -15) + -10 + 1?\n'
b'Evaluate -9 + (7 - 15) + -6 + 5.\n'
b'(-4 - -8) + -12 - (-60 - -50)\n'
#[derive(Debug)] pub struct Rectangle { length: u32, width: u32, } impl Rectangle { pub fn can_hold(&self, other: &Rectangle) -> bool { self.length > other.length && self.width > other.width } } #[cfg(test)] mod tests { use super::*; #[test] fn larger_can_hold_smaller() { let larger = Rectangle { length: 8, width: 7, }; let smaller = Rectangle { length: 5, width: 1, }; assert!(larger.can_hold(&smaller)); } #[test] fn smaller_can_hold_larger() { let larger = Rectangle { length: 8, width: 7, }; let smaller = Rectangle { length: 5, width: 1, }; assert!(!smaller.can_hold(&larger)); } }
# Copyright (c) 2015, Max Fillinger <[email protected]> # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH # REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, # INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM # LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR # OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR # PERFORMANCE OF THIS SOFTWARE. # The epub format specification is available at http://idpf.org/epub/201 '''Contains the EpubBuilder class to build epub2.0.1 files with the getebook module.''' import html import re import datetime import getebook import os.path import re import zipfile __all__ = ['EpubBuilder', 'EpubTOC', 'Author'] def _normalize(name): '''Transform "Firstname [Middlenames] Lastname" into "Lastname, Firstname [Middlenames]".''' split = name.split() if len(split) == 1: return name return split[-1] + ', ' + ' '.join(name[0:-1]) def _make_starttag(tag, attrs): 'Write a starttag.' out = '<' + tag for key in attrs: out += ' {}="{}"'.format(key, html.escape(attrs[key])) out += '>' return out def _make_xml_elem(tag, text, attr = []): 'Write a flat xml element.' out = ' <' + tag for (key, val) in attr: out += ' {}="{}"'.format(key, val) if text: out += '>{}</{}>\n'.format(text, tag) else: out += ' />\n' return out class EpubTOC(getebook.TOC): 'Table of contents.' _head = (( '<?xml version="1.0" encoding="UTF-8"?>\n' '<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1" xml:lang="en-US">\n' ' <head>\n' ' <meta name="dtb:uid" content="{}" />\n' ' <meta name="dtb:depth" content="{}" />\n' ' <meta name="dtb:totalPageCount" content="0" />\n' ' <meta name="dtb:maxPageNumber" content="0" />\n' ' </head>\n' ' <docTitle>\n' ' <text>{}</text>\n' ' </docTitle>\n' )) _doc_author = (( ' <docAuthor>\n' ' <text>{}</text>\n' ' </docAuthor>\n' )) _navp = (( '{0}<navPoint id="nav{1}">\n' '{0} <navLabel>\n' '{0} <text>{2}</text>\n' '{0} </navLabel>\n' '{0} <content src="{3}" />\n' )) def _navp_xml(self, entry, indent_lvl): 'Write xml for an entry and all its subentries.' xml = self._navp.format(' '*indent_lvl, str(entry.no), entry.text, entry.target) for sub in entry.entries: xml += self._navp_xml(sub, indent_lvl+1) xml += ' '*indent_lvl + '</navPoint>\n' return xml def write_xml(self, uid, title, authors): 'Write the xml code for the table of contents.' xml = self._head.format(uid, self.max_depth, title) for aut in authors: xml += self._doc_author.format(aut) xml += ' <navMap>\n' for entry in self.entries: xml += self._navp_xml(entry, 2) xml += ' </navMap>\n</ncx>' return xml class _Fileinfo: 'Information about a component file of an epub.' def __init__(self, name, in_spine = True, guide_title = None, guide_type = None): '''Initialize the object. If the file does not belong in the reading order, in_spine should be set to False. If it should appear in the guide, set guide_title and guide_type.''' self.name = name (self.ident, ext) = os.path.splitext(name) name_split = name.rsplit('.', 1) self.ident = name_split[0] self.in_spine = in_spine self.guide_title = guide_title self.guide_type = guide_type # Infer media-type from file extension ext = ext.lower() if ext in ('.htm', '.html', '.xhtml'): self.media_type = 'application/xhtml+xml' elif ext in ('.png', '.gif', '.jpeg'): self.media_type = 'image/' + ext elif ext == '.jpg': self.media_type = 'image/jpeg' elif ext == '.css': self.media_type = 'text/css' elif ext == '.ncx': self.media_type = 'application/x-dtbncx+xml' else: raise ValueError('Can\'t infer media-type from extension: %s' % ext) def manifest_entry(self): 'Write the XML element for the manifest.' return _make_xml_elem('item', '', [ ('href', self.name), ('id', self.ident), ('media-type', self.media_type) ]) def spine_entry(self): '''Write the XML element for the spine. (Empty string if in_spine is False.)''' if self.in_spine: return _make_xml_elem('itemref', '', [('idref', self.ident)]) else: return '' def guide_entry(self): '''Write the XML element for the guide. (Empty string if no guide title and type are given.)''' if self.guide_title and self.guide_type: return _make_xml_elem('reference', '', [ ('title', self.guide_title), ('type', self.guide_type), ('href', self.name) ]) else: return '' class _EpubMeta: 'Metadata entry for an epub file.' def __init__(self, tag, text, *args): '''The metadata entry is an XML element. *args is used for supplying the XML element's attributes as (key, value) pairs.''' self.tag = tag self.text = text self.attr = args def write_xml(self): 'Write the XML element.' return _make_xml_elem(self.tag, self.text, self.attr) def __repr__(self): 'Returns the text.' return self.text def __str__(self): 'Returns the text.' return self.text class _EpubDate(_EpubMeta): 'Metadata element for the publication date.' _date_re = re.compile('^([0-9]{4})(-[0-9]{2}(-[0-9]{2})?)?$') def __init__(self, date): '''date must be a string of the form "YYYY[-MM[-DD]]". If it is not of this form, or if the date is invalid, ValueError is raised.''' m = self._date_re.match(date) if not m: raise ValueError('invalid date format') year = int(m.group(1)) try: mon = int(m.group(2)[1:]) if mon < 0 or mon > 12: raise ValueError('month must be in 1..12') except IndexError: pass try: day = int(m.group(3)[1:]) datetime.date(year, mon, day) # raises ValueError if invalid except IndexError: pass self.tag = 'dc:date' self.text = date self.attr = () class _EpubLang(_EpubMeta): 'Metadata element for the language of the book.' _lang_re = re.compile('^[a-z]{2}(-[A-Z]{2})?$') def __init__(self, lang): '''lang must be a lower-case two-letter language code, optionally followed by a "-" and a upper-case two-letter country code. (e.g., "en", "en-US", "en-UK", "de", "de-DE", "de-AT")''' if self._lang_re.match(lang): self.tag = 'dc:language' self.text = lang self.attr = () else: raise ValueError('invalid language format') class Author(_EpubMeta): '''To control the file-as and role attribute for the authors, pass an Author object to the EpubBuilder instead of a string. The file-as attribute is a form of the name used for sorting. The role attribute describes how the person was involved in the work. You ONLY need this if an author's name is not of the form "Given-name Family-name", or if you want to specify a role other than author. Otherwise, you can just pass a string. The value of role should be a MARC relator, e.g., "aut" for author or "edt" for editor. See http://www.loc.gov/marc/relators/ for a full list.''' def __init__(self, name, fileas = None, role = 'aut'): '''Initialize the object. If the argument "fileas" is not given, "Last-name, First-name" is used for the file-as attribute. If the argument "role" is not given, "aut" is used for the role attribute.''' if not fileas: fileas = _normalize(name) self.tag = 'dc:creator' self.text = name self.attr = (('opf:file-as', fileas), ('opf:role', role)) class _OPFfile: '''Class for writing the OPF (Open Packaging Format) file for an epub file. The OPF file contains the metadata, a manifest of all component files in the epub, a "spine" which specifies the reading order and a guide which points to important components of the book such as the title page.''' _opf = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<package version="2.0" xmlns="http://www.idpf.org/2007/opf" unique_identifier="uid_id">\n' ' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n' '{}' ' </metadata>\n' ' <manifest>\n' '{}' ' </manifest>\n' ' <spine toc="toc">\n' '{}' ' </spine>\n' ' <guide>\n' '{}' ' </guide>\n' '</package>\n' ) def __init__(self): 'Initialize.' self.meta = [] self.filelist = [] def write_xml(self): 'Write the XML code for the OPF file.' metadata = '' for elem in self.meta: metadata += elem.write_xml() manif = '' spine = '' guide = '' for finfo in self.filelist: manif += finfo.manifest_entry() spine += finfo.spine_entry() guide += finfo.guide_entry() return self._opf.format(metadata, manif, spine, guide) class EpubBuilder: '''Builds an epub2.0.1 file. Some of the attributes of this class (title, uid, lang) are marked as "mandatory" because they represent metadata that is required by the epub specification. If these attributes are left unset, default values will be used.''' _style_css = ( 'h1, h2, h3, h4, h5, h6 {\n' ' text-align: center;\n' '}\n' 'p {\n' ' text-align: justify;\n' ' margin-top: 0.125em;\n' ' margin-bottom: 0em;\n' ' text-indent: 1.0em;\n' '}\n' '.getebook-tp {\n' ' margin-top: 8em;\n' '}\n' '.getebook-tp-authors {\n' ' font-size: 2em;\n' ' text-align: center;\n' ' margin-bottom: 1em;\n' '}\n' '.getebook-tp-title {\n' ' font-weight: bold;\n' ' font-size: 3em;\n' ' text-align: center;\n' '}\n' '.getebook-tp-sub {\n' ' text-align: center;\n' ' font-weight: normal;\n' ' font-size: 0.8em;\n' ' margin-top: 1em;\n' '}\n' '.getebook-false-h {\n' ' font-weight: bold;\n' ' font-size: 1.5em;\n' '}\n' '.getebook-small-h {\n' ' font-style: normal;\n' ' font-weight: normal;\n' ' font-size: 0.8em;\n' '}\n' ) _container_xml = ( '<?xml version="1.0"?>\n' '<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">\n' ' <rootfiles>\n' ' <rootfile full-path="package.opf" media-type="application/oebps-package+xml"/>\n' ' </rootfiles>\n' '</container>\n' ) _html = ( '<?xml version="1.0" encoding="utf-8"?>\n' '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' '<html xmlns="http://www.w3.org/1999/xhtml">\n' ' <head>\n' ' <title>{}</title>\n' ' <meta http-equiv="content-type" content="application/xtml+xml; charset=utf-8" />\n' ' <link href="style.css" rel="stylesheet" type="text/css" />\n' ' </head>\n' ' <body>\n{}' ' </body>\n' '</html>\n' ) _finalized = False def __init__(self, epub_file): '''Initialize the EpubBuilder instance. "epub_file" is the filename of the epub to be created.''' self.epub_f = zipfile.ZipFile(epub_file, 'w', zipfile.ZIP_DEFLATED) self.epub_f.writestr('mimetype', 'application/epub+zip') self.epub_f.writestr('META-INF/container.xml', self._container_xml) self.toc = EpubTOC() self.opf = _OPFfile() self.opf.filelist.append(_Fileinfo('toc.ncx', False)) self.opf.filelist.append(_Fileinfo('style.css', False)) self._authors = [] self.opt_meta = {} # Optional metadata (other than authors) self.content = '' self.part_no = 0 self.cont_filename = 'part%03d.html' % self.part_no def __enter__(self): 'Return self for use in with ... as ... statement.' return self def __exit__(self, except_type, except_val, traceback): 'Call finalize() and close the file.' try: self.finalize() finally: # Close again in case an exception happened in finalize() self.epub_f.close() return False @property def uid(self): '''Unique identifier of the ebook. (mandatory) If this property is left unset, a pseudo-random string will be generated which is long enough for collisions with existing ebooks to be extremely unlikely.''' try: return self._uid except AttributeError: import random from string import (ascii_letters, digits) alnum = ascii_letters + digits self.uid = ''.join([random.choice(alnum) for i in range(15)]) return self._uid @uid.setter def uid(self, val): self._uid = _EpubMeta('dc:identifier', str(val), ('id', 'uid_id')) @property def title(self): '''Title of the ebook. (mandatory) If this property is left unset, it defaults to "Untitled".''' try: return self._title except AttributeError: self.title = 'Untitled' return self._title @title.setter def title(self, val): # If val is not a string, raise TypeError now rather than later. self._title = _EpubMeta('dc:title', '' + val) @property def lang(self): '''Language of the ebook. (mandatory) The language must be given as a lower-case two-letter code, optionally followed by a "-" and an upper-case two-letter country code. (e.g., "en", "en-US", "en-UK", "de", "de-DE", "de-AT") If this property is left unset, it defaults to "en".''' try: return self._lang except AttributeError: self.lang = 'en' return self._lang @lang.setter def lang(self, val): self._lang = _EpubLang(val) @property def author(self): '''Name of the author. (optional) If there are multiple authors, pass a list of strings. To control the file-as and role attribute, use author objects instead of strings; file-as is an alternate form of the name used for sorting. For a description of the role attribute, see the docstring of the author class.''' if len(self._authors) == 1: return self._authors[0] return tuple([aut for aut in self._authors]) @author.setter def author(self, val): if isinstance(val, Author) or isinstance(val, str): authors = [val] else: authors = val for aut in authors: try: self._authors.append(Author('' + aut)) except TypeError: # aut is not a string, so it should be an Author object self._authors.append(aut) @author.deleter def author(self): self._authors = [] @property def date(self): '''Publication date. (optional) Must be given in "YYYY[-MM[-DD]]" format.''' try: return self.opt_meta['date'] except KeyError: return None @date.setter def date(self, val): self.opt_meta['date'] = _EpubDate(val) @date.deleter def date(self): del self._date @property def rights(self): 'Copyright/licensing information. (optional)' try: return self.opt_meta['rights'] except KeyError: return None @rights.setter def rights(self, val): self.opt_meta['rights'] = _EpubMeta('dc:rights', '' + val) @rights.deleter def rights(self): del self._rights @property def publisher(self): 'Publisher name. (optional)' try: return self.opt_meta['publisher'] except KeyError: return None @publisher.setter def publisher(self, val): self.opt_meta['publisher'] = _EpubMeta('dc:publisher', '' + val) @publisher.deleter def publisher(self): del self._publisher @property def style_css(self): '''CSS stylesheet for the files that are generated by the EpubBuilder instance. Can be overwritten or extended, but not deleted.''' return self._style_css @style_css.setter def style_css(self, val): self._style_css = '' + val def titlepage(self, main_title = None, subtitle = None): '''Create a title page for the ebook. If no main_title is given, the title attribute of the EpubBuilder instance is used.''' tp = '<div class="getebook-tp">\n' if len(self._authors) >= 1: if len(self._authors) == 1: aut_str = str(self._authors[0]) else: aut_str = ', '.join(str(self._authors[0:-1])) + ', and ' \ + str(self._authors[-1]) tp += '<div class="getebook-tp-authors">%s</div>\n' % aut_str if not main_title: main_title = str(self.title) tp += '<div class="getebook-tp-title">%s' % main_title if subtitle: tp += '<div class="getebook-tp-sub">%s</div>' % subtitle tp += '</div>\n</div>\n' self.opf.filelist.insert(0, _Fileinfo('title.html', guide_title = 'Titlepage', guide_type = 'title-page')) self.epub_f.writestr('title.html', self._html.format(self.title, tp)) def headingpage(self, heading, subtitle = None, toc_text = None): '''Create a page containing only a (large) heading, optionally with a smaller subtitle. If toc_text is not given, it defaults to the heading.''' self.new_part() tag = 'h%d' % min(6, self.toc.depth) self.content += '<div class="getebook-tp">' self.content += '<{} class="getebook-tp-title">{}'.format(tag, heading) if subtitle: self.content += '<div class="getebook-tp-sub">%s</div>' % subtitle self.content += '</%s>\n' % tag if not toc_text: toc_text = heading self.toc.new_entry(toc_text, self.cont_filename) self.new_part() def insert_file(self, name, in_spine = False, guide_title = None, guide_type = None, arcname = None): '''Include an external file into the ebook. By default, it will be added to the archive under its basename; the argument "arcname" can be used to specify a different name.''' if not arcname: arcname = os.path.basename(name) self.opf.filelist.append(_Fileinfo(arcname, in_spine, guide_title, guide_type)) self.epub_f.write(name, arcname) def add_file(self, arcname, str_or_bytes, in_spine = False, guide_title = None, guide_type = None): '''Add the string or bytes instance str_or_bytes to the archive under the name arcname.''' self.opf.filelist.append(_Fileinfo(arcname, in_spine, guide_title, guide_type)) self.epub_f.writestr(arcname, str_or_bytes) def false_heading(self, elem): '''Handle a "false heading", i.e., text that appears in heading tags in the source even though it is not a chapter heading.''' elem.attrs['class'] = 'getebook-false-h' elem.tag = 'p' self.handle_elem(elem) def _heading(self, elem): '''Write a heading.''' # Handle paragraph heading if we have one waiting (see the # par_heading method). We don\'t use _handle_par_h here because # we merge it with the subsequent proper heading. try: par_h = self.par_h del self.par_h except AttributeError: toc_text = elem.text else: # There is a waiting paragraph heading, we merge it with the # new heading. toc_text = par_h.text + '. ' + elem.text par_h.tag = 'div' par_h.attrs['class'] = 'getebook-small-h' elem.children.insert(0, par_h) # Set the class attribute value. elem.attrs['class'] = 'getebook-chapter-h' self.toc.new_entry(toc_text, self.cont_filename) # Add heading to the epub. tag = 'h%d' % min(self.toc.depth, 6) self.content += _make_starttag(tag, elem.attrs) for elem in elem.children: self.handle_elem(elem) self.content += '</%s>\n' % tag def par_heading(self, elem): '''Handle a "paragraph heading", i.e., a chaper heading or part of a chapter heading inside paragraph tags. If it is immediately followed by a heading, they will be merged into one.''' self.par_h = elem def _handle_par_h(self): 'Check if there is a waiting paragraph heading and handle it.' try: self._heading(self.par_h) except AttributeError: pass def handle_elem(self, elem): 'Handle html element as supplied by getebook.EbookParser.' try: tag = elem.tag except AttributeError: # elem should be a string is_string = True tag = None else: is_string = False if tag in getebook._headings: self._heading(elem) else: # Handle waiting par_h if necessary (see par_heading) try: self._heading(self.par_h) except AttributeError: pass if is_string: self.content += elem elif tag == 'br': self.content += '<br />\n' elif tag == 'img': self.content += self._handle_image(elem.attrs) + '\n' elif tag == 'a' or tag == 'noscript': # Ignore tag, just write child elements for child in elem.children: self.handle_elem(child) else: self.content += _make_starttag(tag, elem.attrs) for child in elem.children: self.handle_elem(child) self.content += '</%s>' % tag if tag == 'p': self.content += '\n' def _handle_image(self, attrs): 'Returns the alt text of an image tag.' try: return attrs['alt'] except KeyError: return '' def new_part(self): '''Begin a new part of the epub. Write the current html document to the archive and begin a new one.''' # Handle waiting par_h (see par_heading) try: self._heading(self.par_h) except AttributeError: pass if self.content: html = self._html.format(self.title, self.content) self.epub_f.writestr(self.cont_filename, html) self.part_no += 1 self.content = '' self.cont_filename = 'part%03d.html' % self.part_no self.opf.filelist.append(_Fileinfo(self.cont_filename)) def finalize(self): 'Complete and close the epub file.' # Handle waiting par_h (see par_heading) if self._finalized: # Avoid finalizing twice. Otherwise, calling finalize inside # a with-block would lead to an exception when __exit__ # calls finalize again. return try: self._heading(self.par_h) except AttributeError: pass if self.content: html = self._html.format(self.title, self.content) self.epub_f.writestr(self.cont_filename, html) self.opf.meta = [self.uid, self.lang, self.title] + self._authors self.opf.meta += self.opt_meta.values() self.epub_f.writestr('package.opf', self.opf.write_xml()) self.epub_f.writestr('toc.ncx', self.toc.write_xml(self.uid, self.title, self._authors)) self.epub_f.writestr('style.css', self._style_css) self.epub_f.close() self._finalized = True
import hashlib import json import logging import os import subprocess import sys import time from collections import defaultdict from shutil import copy from shutil import copyfile from shutil import copystat from shutil import copytree from tempfile import mkdtemp import boto3 import botocore import yaml import sys from .helpers import archive from .helpers import get_environment_variable_value from .helpers import LambdaContext from .helpers import mkdir from .helpers import read from .helpers import timestamp ARN_PREFIXES = { "cn-north-1": "aws-cn", "cn-northwest-1": "aws-cn", "us-gov-west-1": "aws-us-gov", } log = logging.getLogger(__name__) def load_source(module_name, module_path): """Loads a python module from the path of the corresponding file.""" if sys.version_info[0] == 3 and sys.version_info[1] >= 5: import importlib.util spec = importlib.util.spec_from_file_location(module_name, module_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) elif sys.version_info[0] == 3 and sys.version_info[1] < 5: import importlib.machinery loader = importlib.machinery.SourceFileLoader(module_name, module_path) module = loader.load_module() return module def cleanup_old_versions( src, keep_last_versions, config_file="config.yaml", profile_name=None, ): """Deletes old deployed versions of the function in AWS Lambda. Won't delete $Latest and any aliased version :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param int keep_last_versions: The number of recent versions to keep and not delete """ if keep_last_versions <= 0: print("Won't delete all versions. Please do this manually") else: path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) profile_name = cfg.get("profile") aws_access_key_id = cfg.get("aws_access_key_id") aws_secret_access_key = cfg.get("aws_secret_access_key") client = get_client( "lambda", profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region"), ) response = client.list_versions_by_function( FunctionName=cfg.get("function_name"), ) versions = response.get("Versions") if len(response.get("Versions")) < keep_last_versions: print("Nothing to delete. (Too few versions published)") else: version_numbers = [ elem.get("Version") for elem in versions[1:-keep_last_versions] ] for version_number in version_numbers: try: client.delete_function( FunctionName=cfg.get("function_name"), Qualifier=version_number, ) except botocore.exceptions.ClientError as e: print(f"Skipping Version {version_number}: {e}") def deploy( src, requirements=None, local_package=None, config_file="config.yaml", profile_name=None, preserve_vpc=False, ): """Deploys a new function to AWS Lambda. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi) """ # Load and parse the config file. path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) # Copy all the pip dependencies required to run your code into a temporary # folder then add the handler file in the root of this directory. # Zip the contents of this folder into a single file and output to the dist # directory. path_to_zip_file = build( src, config_file=config_file, requirements=requirements, local_package=local_package, ) existing_config = get_function_config(cfg) if existing_config: update_function( cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc ) else: create_function(cfg, path_to_zip_file) def deploy_s3( src, requirements=None, local_package=None, config_file="config.yaml", profile_name=None, preserve_vpc=False, ): """Deploys a new function via AWS S3. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi) """ # Load and parse the config file. path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) # Copy all the pip dependencies required to run your code into a temporary # folder then add the handler file in the root of this directory. # Zip the contents of this folder into a single file and output to the dist # directory. path_to_zip_file = build( src, config_file=config_file, requirements=requirements, local_package=local_package, ) use_s3 = True s3_file = upload_s3(cfg, path_to_zip_file, use_s3) existing_config = get_function_config(cfg) if existing_config: update_function( cfg, path_to_zip_file, existing_config, use_s3=use_s3, s3_file=s3_file, preserve_vpc=preserve_vpc, ) else: create_function(cfg, path_to_zip_file, use_s3=use_s3, s3_file=s3_file) def upload( src, requirements=None, local_package=None, config_file="config.yaml", profile_name=None, ): """Uploads a new function to AWS S3. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi) """ # Load and parse the config file. path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) # Copy all the pip dependencies required to run your code into a temporary # folder then add the handler file in the root of this directory. # Zip the contents of this folder into a single file and output to the dist # directory. path_to_zip_file = build( src, config_file=config_file, requirements=requirements, local_package=local_package, ) upload_s3(cfg, path_to_zip_file) def invoke( src, event_file="event.json", config_file="config.yaml", profile_name=None, verbose=False, ): """Simulates a call to your function. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str alt_event: An optional argument to override which event file to use. :param bool verbose: Whether to print out verbose details. """ # Load and parse the config file. path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) # Set AWS_PROFILE environment variable based on `--profile` option. if profile_name: os.environ["AWS_PROFILE"] = profile_name # Load environment variables from the config file into the actual # environment. env_vars = cfg.get("environment_variables") if env_vars: for key, value in env_vars.items(): os.environ[key] = get_environment_variable_value(value) # Load and parse event file. path_to_event_file = os.path.join(src, event_file) event = read(path_to_event_file, loader=json.loads) # Tweak to allow module to import local modules try: sys.path.index(src) except ValueError: sys.path.append(src) handler = cfg.get("handler") # Inspect the handler string (<module>.<function name>) and translate it # into a function we can execute. fn = get_callable_handler_function(src, handler) timeout = cfg.get("timeout") if timeout: context = LambdaContext(cfg.get("function_name"), timeout) else: context = LambdaContext(cfg.get("function_name")) start = time.time() results = fn(event, context) end = time.time() print("{0}".format(results)) if verbose: print( "\nexecution time: {:.8f}s\nfunction execution " "timeout: {:2}s".format(end - start, cfg.get("timeout", 15)) ) def init(src, minimal=False): """Copies template files to a given directory. :param str src: The path to output the template lambda project files. :param bool minimal: Minimal possible template files (excludes event.json). """ templates_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), "project_templates", ) for filename in os.listdir(templates_path): if (minimal and filename == "event.json") or filename.endswith(".pyc"): continue dest_path = os.path.join(templates_path, filename) if not os.path.isdir(dest_path): copy(dest_path, src) def build( src, requirements=None, local_package=None, config_file="config.yaml", profile_name=None, ): """Builds the file bundle. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi) """ # Load and parse the config file. path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) # Get the absolute path to the output directory and create it if it doesn't # already exist. dist_directory = cfg.get("dist_directory", "dist") path_to_dist = os.path.join(src, dist_directory) mkdir(path_to_dist) # Combine the name of the Lambda function with the current timestamp to use # for the output filename. function_name = cfg.get("function_name") output_filename = "{0}-{1}.zip".format(timestamp(), function_name) path_to_temp = mkdtemp(prefix="aws-lambda") pip_install_to_target( path_to_temp, requirements=requirements, local_package=local_package, ) # Hack for Zope. if "zope" in os.listdir(path_to_temp): print( "Zope packages detected; fixing Zope package paths to " "make them importable.", ) # Touch. with open(os.path.join(path_to_temp, "zope/__init__.py"), "wb"): pass # Gracefully handle whether ".zip" was included in the filename or not. output_filename = ( "{0}.zip".format(output_filename) if not output_filename.endswith(".zip") else output_filename ) # Allow definition of source code directories we want to build into our # zipped package. build_config = defaultdict(**cfg.get("build", {})) build_source_directories = build_config.get("source_directories", "") build_source_directories = ( build_source_directories if build_source_directories is not None else "" ) source_directories = [ d.strip() for d in build_source_directories.split(",") ] files = [] for filename in os.listdir(src): if os.path.isfile(filename): if filename == ".DS_Store": continue if filename == config_file: continue print("Bundling: %r" % filename) files.append(os.path.join(src, filename)) elif os.path.isdir(filename) and filename in source_directories: print("Bundling directory: %r" % filename) files.append(os.path.join(src, filename)) # "cd" into `temp_path` directory. os.chdir(path_to_temp) for f in files: if os.path.isfile(f): _, filename = os.path.split(f) # Copy handler file into root of the packages folder. copyfile(f, os.path.join(path_to_temp, filename)) copystat(f, os.path.join(path_to_temp, filename)) elif os.path.isdir(f): src_path_length = len(src) + 1 destination_folder = os.path.join( path_to_temp, f[src_path_length:] ) copytree(f, destination_folder) # Zip them together into a single file. # TODO: Delete temp directory created once the archive has been compiled. path_to_zip_file = archive("./", path_to_dist, output_filename) return path_to_zip_file def get_callable_handler_function(src, handler): """Translate a string of the form "module.function" into a callable function. :param str src: The path to your Lambda project containing a valid handler file. :param str handler: A dot delimited string representing the `<module>.<function name>`. """ # "cd" into `src` directory. os.chdir(src) module_name, function_name = handler.split(".") filename = get_handler_filename(handler) path_to_module_file = os.path.join(src, filename) module = load_source(module_name, path_to_module_file) return getattr(module, function_name) def get_handler_filename(handler): """Shortcut to get the filename from the handler string. :param str handler: A dot delimited string representing the `<module>.<function name>`. """ module_name, _ = handler.split(".") return "{0}.py".format(module_name) def _install_packages(path, packages): """Install all packages listed to the target directory. Ignores any package that includes Python itself and python-lambda as well since its only needed for deploying and not running the code :param str path: Path to copy installed pip packages to. :param list packages: A list of packages to be installed via pip. """ def _filter_blacklist(package): blacklist = ["-i", "#", "Python==", "python-lambda=="] return all(package.startswith(entry) is False for entry in blacklist) filtered_packages = filter(_filter_blacklist, packages) for package in filtered_packages: if package.startswith("-e "): package = package.replace("-e ", "") print("Installing {package}".format(package=package)) subprocess.check_call( [ sys.executable, "-m", "pip", "install", package, "-t", path, "--ignore-installed", ] ) print( "Install directory contents are now: {directory}".format( directory=os.listdir(path) ) ) def pip_install_to_target(path, requirements=None, local_package=None): """For a given active virtualenv, gather all installed pip packages then copy (re-install) them to the path provided. :param str path: Path to copy installed pip packages to. :param str requirements: If set, only the packages in the supplied requirements file are installed. If not set then installs all packages found via pip freeze. :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi) """ packages = [] if not requirements: print("Gathering pip packages") pkgStr = subprocess.check_output( [sys.executable, "-m", "pip", "freeze"] ) packages.extend(pkgStr.decode("utf-8").splitlines()) else: if os.path.exists(requirements): print("Gathering requirement packages") data = read(requirements) packages.extend(data.splitlines()) if not packages: print("No dependency packages installed!") if local_package is not None: if not isinstance(local_package, (list, tuple)): local_package = [local_package] for l_package in local_package: packages.append(l_package) _install_packages(path, packages) def get_role_name(region, account_id, role): """Shortcut to insert the `account_id` and `role` into the iam string.""" prefix = ARN_PREFIXES.get(region, "aws") return "arn:{0}:iam::{1}:role/{2}".format(prefix, account_id, role) def get_account_id( profile_name, aws_access_key_id, aws_secret_access_key, region=None, ): """Query STS for a users' account_id""" client = get_client( "sts", profile_name, aws_access_key_id, aws_secret_access_key, region, ) return client.get_caller_identity().get("Account") def get_client( client, profile_name, aws_access_key_id, aws_secret_access_key, region=None, ): """Shortcut for getting an initialized instance of the boto3 client.""" boto3.setup_default_session( profile_name=profile_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region, ) return boto3.client(client) def create_function(cfg, path_to_zip_file, use_s3=False, s3_file=None): """Register and upload a function to AWS Lambda.""" print("Creating your new Lambda function") byte_stream = read(path_to_zip_file, binary_file=True) profile_name = cfg.get("profile") aws_access_key_id = cfg.get("aws_access_key_id") aws_secret_access_key = cfg.get("aws_secret_access_key") account_id = get_account_id( profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region",), ) role = get_role_name( cfg.get("region"), account_id, cfg.get("role", "lambda_basic_execution"), ) client = get_client( "lambda", profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region"), ) # Do we prefer development variable over config? buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name") func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get( "function_name" ) print("Creating lambda function with name: {}".format(func_name)) if use_s3: kwargs = { "FunctionName": func_name, "Runtime": cfg.get("runtime", "python2.7"), "Role": role, "Handler": cfg.get("handler"), "Code": { "S3Bucket": "{}".format(buck_name), "S3Key": "{}".format(s3_file), }, "Description": cfg.get("description", ""), "Timeout": cfg.get("timeout", 15), "MemorySize": cfg.get("memory_size", 512), "VpcConfig": { "SubnetIds": cfg.get("subnet_ids", []), "SecurityGroupIds": cfg.get("security_group_ids", []), }, "Publish": True, } else: kwargs = { "FunctionName": func_name, "Runtime": cfg.get("runtime", "python2.7"), "Role": role, "Handler": cfg.get("handler"), "Code": {"ZipFile": byte_stream}, "Description": cfg.get("description", ""), "Timeout": cfg.get("timeout", 15), "MemorySize": cfg.get("memory_size", 512), "VpcConfig": { "SubnetIds": cfg.get("subnet_ids", []), "SecurityGroupIds": cfg.get("security_group_ids", []), }, "Publish": True, } if "tags" in cfg: kwargs.update( Tags={key: str(value) for key, value in cfg.get("tags").items()} ) if "environment_variables" in cfg: kwargs.update( Environment={ "Variables": { key: get_environment_variable_value(value) for key, value in cfg.get("environment_variables").items() }, }, ) client.create_function(**kwargs) concurrency = get_concurrency(cfg) if concurrency > 0: client.put_function_concurrency( FunctionName=func_name, ReservedConcurrentExecutions=concurrency ) def update_function( cfg, path_to_zip_file, existing_cfg, use_s3=False, s3_file=None, preserve_vpc=False, ): """Updates the code of an existing Lambda function""" print("Updating your Lambda function") byte_stream = read(path_to_zip_file, binary_file=True) profile_name = cfg.get("profile") aws_access_key_id = cfg.get("aws_access_key_id") aws_secret_access_key = cfg.get("aws_secret_access_key") account_id = get_account_id( profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region",), ) role = get_role_name( cfg.get("region"), account_id, cfg.get("role", "lambda_basic_execution"), ) client = get_client( "lambda", profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region"), ) # Do we prefer development variable over config? buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name") if use_s3: client.update_function_code( FunctionName=cfg.get("function_name"), S3Bucket="{}".format(buck_name), S3Key="{}".format(s3_file), Publish=True, ) else: client.update_function_code( FunctionName=cfg.get("function_name"), ZipFile=byte_stream, Publish=True, ) kwargs = { "FunctionName": cfg.get("function_name"), "Role": role, "Runtime": cfg.get("runtime"), "Handler": cfg.get("handler"), "Description": cfg.get("description", ""), "Timeout": cfg.get("timeout", 15), "MemorySize": cfg.get("memory_size", 512), } if preserve_vpc: kwargs["VpcConfig"] = existing_cfg.get("Configuration", {}).get( "VpcConfig" ) if kwargs["VpcConfig"] is None: kwargs["VpcConfig"] = { "SubnetIds": cfg.get("subnet_ids", []), "SecurityGroupIds": cfg.get("security_group_ids", []), } else: del kwargs["VpcConfig"]["VpcId"] else: kwargs["VpcConfig"] = { "SubnetIds": cfg.get("subnet_ids", []), "SecurityGroupIds": cfg.get("security_group_ids", []), } if "environment_variables" in cfg: kwargs.update( Environment={ "Variables": { key: str(get_environment_variable_value(value)) for key, value in cfg.get("environment_variables").items() }, }, ) ret = client.update_function_configuration(**kwargs) concurrency = get_concurrency(cfg) if concurrency > 0: client.put_function_concurrency( FunctionName=cfg.get("function_name"), ReservedConcurrentExecutions=concurrency, ) elif "Concurrency" in existing_cfg: client.delete_function_concurrency( FunctionName=cfg.get("function_name") ) if "tags" in cfg: tags = {key: str(value) for key, value in cfg.get("tags").items()} if tags != existing_cfg.get("Tags"): if existing_cfg.get("Tags"): client.untag_resource( Resource=ret["FunctionArn"], TagKeys=list(existing_cfg["Tags"].keys()), ) client.tag_resource(Resource=ret["FunctionArn"], Tags=tags) def upload_s3(cfg, path_to_zip_file, *use_s3): """Upload a function to AWS S3.""" print("Uploading your new Lambda function") profile_name = cfg.get("profile") aws_access_key_id = cfg.get("aws_access_key_id") aws_secret_access_key = cfg.get("aws_secret_access_key") client = get_client( "s3", profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region"), ) byte_stream = b"" with open(path_to_zip_file, mode="rb") as fh: byte_stream = fh.read() s3_key_prefix = cfg.get("s3_key_prefix", "/dist") checksum = hashlib.new("md5", byte_stream).hexdigest() timestamp = str(time.time()) filename = "{prefix}{checksum}-{ts}.zip".format( prefix=s3_key_prefix, checksum=checksum, ts=timestamp, ) # Do we prefer development variable over config? buck_name = os.environ.get("S3_BUCKET_NAME") or cfg.get("bucket_name") func_name = os.environ.get("LAMBDA_FUNCTION_NAME") or cfg.get( "function_name" ) kwargs = { "Bucket": "{}".format(buck_name), "Key": "{}".format(filename), "Body": byte_stream, } client.put_object(**kwargs) print("Finished uploading {} to S3 bucket {}".format(func_name, buck_name)) if use_s3: return filename def get_function_config(cfg): """Check whether a function exists or not and return its config""" function_name = cfg.get("function_name") profile_name = cfg.get("profile") aws_access_key_id = cfg.get("aws_access_key_id") aws_secret_access_key = cfg.get("aws_secret_access_key") client = get_client( "lambda", profile_name, aws_access_key_id, aws_secret_access_key, cfg.get("region"), ) try: return client.get_function(FunctionName=function_name) except client.exceptions.ResourceNotFoundException as e: if "Function not found" in str(e): return False def get_concurrency(cfg): """Return the Reserved Concurrent Executions if present in the config""" concurrency = int(cfg.get("concurrency", 0)) return max(0, concurrency) def read_cfg(path_to_config_file, profile_name): cfg = read(path_to_config_file, loader=yaml.full_load) if profile_name is not None: cfg["profile"] = profile_name elif "AWS_PROFILE" in os.environ: cfg["profile"] = os.environ["AWS_PROFILE"] return cfg
# Limit rozbieżności **Ostrzeżenie! Ustawienie granicy rozbieżności nie powinno być zmieniane.** Zwiększenie granicy rozbieżności może spowodować znaczny spadek wydajności. Limit rozbieżności określa ilość adresów, które portfel wygeneruje i przeprowadzi prognozy, aby określić wykorzystanie. Domyślnie, limit rozbieżności jest ustawiony na 20. Oznacza to 2 rzeczy. 1. Kiedy portfel ładuje się po raz pierwszy, skanuje w poszukiwaniu adresów w użycia i oczekuje, że największa przerwa między adresami będzie wynosić 20; 2. Kiedy użytkownik otrzymuje nowo wygenerowane adresy, ich liczba wynosi tylko 20, następnie portfel wykonuje tą operację ponownie co powoduje, że luki pomiędzy adresami nie są większe niż 20. Tak naprawdę są tylko dwa przypadki w których należy zmienić tę wartość: 1. Jeśli twój portfel został stworzony i używany intensywnie przed v1.0, może mieć duże luki adresowe. Jeśli przywracasz portfel z seeda i zauważysz, że brakuje funduszy, możesz zwiększyć ustawienie do 100 (następnie 1000 jeśli problem wciąż występuje), a następnie ponownie uruchom Decrediton. Po przywróceniu funduszy możesz powrócić do 20. 2. Jeśli chcesz być w stanie wygenerować więcej niż 20 adresów na raz, bez kolejkowania.
<!doctype html> <html> <title>commands</title> <meta http-equiv="content-type" value="text/html;utf-8"> <link rel="stylesheet" type="text/css" href="../static/style.css"> <body> <div id="wrapper"> <h1><a href="../api/commands.html">commands</a></h1> <p>npm commands</p> <h2 id="SYNOPSIS">SYNOPSIS</h2> <pre><code>npm.commands[&lt;command&gt;](args, callback)</code></pre> <h2 id="DESCRIPTION">DESCRIPTION</h2> <p>npm comes with a full set of commands, and each of the commands takes a similar set of arguments.</p> <p>In general, all commands on the command object take an <strong>array</strong> of positional argument <strong>strings</strong>. The last argument to any function is a callback. Some commands are special and take other optional arguments.</p> <p>All commands have their own man page. See <code>man npm-&lt;command&gt;</code> for command-line usage, or <code>man 3 npm-&lt;command&gt;</code> for programmatic usage.</p> <h2 id="SEE-ALSO">SEE ALSO</h2> <ul><li><a href="../doc/index.html">index(1)</a></li></ul> </div> <p id="footer">commands &mdash; [email protected]</p> <script> ;(function () { var wrapper = document.getElementById("wrapper") var els = Array.prototype.slice.call(wrapper.getElementsByTagName("*"), 0) .filter(function (el) { return el.parentNode === wrapper && el.tagName.match(/H[1-6]/) && el.id }) var l = 2 , toc = document.createElement("ul") toc.innerHTML = els.map(function (el) { var i = el.tagName.charAt(1) , out = "" while (i > l) { out += "<ul>" l ++ } while (i < l) { out += "</ul>" l -- } out += "<li><a href='#" + el.id + "'>" + ( el.innerText || el.text || el.innerHTML) + "</a>" return out }).join("\n") toc.id = "toc" document.body.appendChild(toc) })() </script> </body></html>
/* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. */ #include <sys/types.h> #include <ctype.h> #include <errno.h> #include <devid.h> #include <fcntl.h> #include <libintl.h> #include <stdio.h> #include <stdlib.h> #include <strings.h> #include <dlfcn.h> #include <unistd.h> #include <sys/efi_partition.h> #include <sys/vtoc.h> #include <sys/stat.h> #include <sys/zfs_ioctl.h> #include "zfs_namecheck.h" #include "zfs_prop.h" #include "libzfs_impl.h" #include "zfs_comutil.h" #include "format.h" #include <syslog.h> /*static int read_efi_label(nvlist_t *config, diskaddr_t *sb);*/ #if defined(__i386) || defined(__amd64) #define BOOTCMD "installgrub(1M)" #else #define BOOTCMD "installboot(1M)" #endif #define DISK_ROOT "/dev" #define RDISK_ROOT "/dev" #define BACKUP_SLICE "s2" /* * ==================================================================== * zpool property functions * ==================================================================== */ static int zpool_get_all_props(zpool_handle_t *zhp) { zfs_cmd_t zc = { 0 }; libzfs_handle_t *hdl = zhp->zpool_hdl; (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) return (-1); while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { if (errno == ENOMEM) { if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { zcmd_free_nvlists(&zc); return (-1); } } else { zcmd_free_nvlists(&zc); return (-1); } } if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { zcmd_free_nvlists(&zc); return (-1); } zcmd_free_nvlists(&zc); return (0); } static int zpool_props_refresh(zpool_handle_t *zhp) { nvlist_t *old_props; old_props = zhp->zpool_props; if (zpool_get_all_props(zhp) != 0) return (-1); nvlist_free(old_props); return (0); } static char * zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) { nvlist_t *nv, *nvl; uint64_t ival; char *value; zprop_source_t source; nvl = zhp->zpool_props; if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); source = ival; verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); } else { source = ZPROP_SRC_DEFAULT; if ((value = (char *)zpool_prop_default_string(prop)) == NULL) value = "-"; } if (src) *src = source; return (value); } uint64_t zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) { nvlist_t *nv, *nvl; uint64_t value; zprop_source_t source; if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { /* * zpool_get_all_props() has most likely failed because * the pool is faulted, but if all we need is the top level * vdev's guid then get it from the zhp config nvlist. */ if ((prop == ZPOOL_PROP_GUID) && (nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) == 0)) { return (value); } return (zpool_prop_default_numeric(prop)); } nvl = zhp->zpool_props; if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); source = value; verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); } else { source = ZPROP_SRC_DEFAULT; value = zpool_prop_default_numeric(prop); } if (src) *src = source; return (value); } /* * Map VDEV STATE to printed strings. */ char * zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) { switch (state) { case VDEV_STATE_CLOSED: case VDEV_STATE_OFFLINE: return (gettext("OFFLINE")); case VDEV_STATE_REMOVED: return (gettext("REMOVED")); case VDEV_STATE_CANT_OPEN: if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) return (gettext("FAULTED")); else if (aux == VDEV_AUX_SPLIT_POOL) return (gettext("SPLIT")); else return (gettext("UNAVAIL")); case VDEV_STATE_FAULTED: return (gettext("FAULTED")); case VDEV_STATE_DEGRADED: return (gettext("DEGRADED")); case VDEV_STATE_HEALTHY: return (gettext("ONLINE")); } return (gettext("UNKNOWN")); } /* * Get a zpool property value for 'prop' and return the value in * a pre-allocated buffer. */ int zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, zprop_source_t *srctype) { uint64_t intval; const char *strval; zprop_source_t src = ZPROP_SRC_NONE; nvlist_t *nvroot; vdev_stat_t *vs; uint_t vsc; if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { switch (prop) { case ZPOOL_PROP_NAME: (void) strlcpy(buf, zpool_get_name(zhp), len); break; case ZPOOL_PROP_HEALTH: (void) strlcpy(buf, "FAULTED", len); break; case ZPOOL_PROP_GUID: intval = zpool_get_prop_int(zhp, prop, &src); (void) snprintf(buf, len, "%llu", (long long unsigned int)intval); break; case ZPOOL_PROP_ALTROOT: case ZPOOL_PROP_CACHEFILE: if (zhp->zpool_props != NULL || zpool_get_all_props(zhp) == 0) { (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), len); if (srctype != NULL) *srctype = src; return (0); } /* FALLTHROUGH */ default: (void) strlcpy(buf, "-", len); break; } if (srctype != NULL) *srctype = src; return (0); } if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && prop != ZPOOL_PROP_NAME) return (-1); switch (zpool_prop_get_type(prop)) { case PROP_TYPE_STRING: (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), len); break; case PROP_TYPE_NUMBER: intval = zpool_get_prop_int(zhp, prop, &src); switch (prop) { case ZPOOL_PROP_SIZE: case ZPOOL_PROP_ALLOCATED: case ZPOOL_PROP_FREE: (void) zfs_nicenum(intval, buf, len); break; case ZPOOL_PROP_CAPACITY: (void) snprintf(buf, len, "%llu%%", (u_longlong_t)intval); break; case ZPOOL_PROP_DEDUPRATIO: (void) snprintf(buf, len, "%llu.%02llux", (u_longlong_t)(intval / 100), (u_longlong_t)(intval % 100)); break; case ZPOOL_PROP_HEALTH: verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) == 0); (void) strlcpy(buf, zpool_state_to_name(intval, vs->vs_aux), len); break; default: (void) snprintf(buf, len, "%llu", (u_longlong_t) intval); } break; case PROP_TYPE_INDEX: intval = zpool_get_prop_int(zhp, prop, &src); if (zpool_prop_index_to_string(prop, intval, &strval) != 0) return (-1); (void) strlcpy(buf, strval, len); break; default: abort(); } if (srctype) *srctype = src; return (0); } /* * Check if the bootfs name has the same pool name as it is set to. * Assuming bootfs is a valid dataset name. */ static boolean_t bootfs_name_valid(const char *pool, char *bootfs) { int len = strlen(pool); if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) return (B_FALSE); if (strncmp(pool, bootfs, len) == 0 && (bootfs[len] == '/' || bootfs[len] == '\0')) return (B_TRUE); return (B_FALSE); } /* * Inspect the configuration to determine if any of the devices contain * an EFI label. */ /* ZFSFUSE: disabled */ #if 0 static boolean_t pool_uses_efi(nvlist_t *config) { nvlist_t **child; uint_t c, children; if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) return (read_efi_label(config, NULL) >= 0); for (c = 0; c < children; c++) { if (pool_uses_efi(child[c])) return (B_TRUE); } return (B_FALSE); } #endif static boolean_t pool_is_bootable(zpool_handle_t *zhp) { char bootfs[ZPOOL_MAXNAMELEN]; return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-", sizeof (bootfs)) != 0); } /* * Given an nvlist of zpool properties to be set, validate that they are * correct, and parse any numeric properties (index, boolean, etc) if they are * specified as strings. */ static nvlist_t * zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, nvlist_t *props, uint64_t version, boolean_t create_or_import, char *errbuf) { nvpair_t *elem; nvlist_t *retprops; zpool_prop_t prop; char *strval; uint64_t intval; char *slash; struct stat64 statbuf; zpool_handle_t *zhp; nvlist_t *nvroot; if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { (void) no_memory(hdl); return (NULL); } elem = NULL; while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { const char *propname = nvpair_name(elem); /* * Make sure this property is valid and applies to this type. */ if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid property '%s'"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (zpool_prop_readonly(prop)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " "is readonly"), propname); (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); goto error; } if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, &strval, &intval, errbuf) != 0) goto error; /* * Perform additional checking for specific properties. */ switch (prop) { case ZPOOL_PROP_VERSION: if (intval < version || intval > SPA_VERSION) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property '%s' number %d is invalid."), propname, intval); (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); goto error; } break; case ZPOOL_PROP_BOOTFS: if (create_or_import) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property '%s' cannot be set at creation " "or import time"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (version < SPA_VERSION_BOOTFS) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded to support " "'%s' property"), propname); (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); goto error; } /* * bootfs property value has to be a dataset name and * the dataset has to be in the same pool as it sets to. */ if (strval[0] != '\0' && !bootfs_name_valid(poolname, strval)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " "is an invalid name"), strval); (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); goto error; } if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "could not open pool '%s'"), poolname); (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); goto error; } verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); /* * bootfs property cannot be set on a disk which has * been EFI labeled. */ /* ZFSFUSE: disabled */ /*if (pool_uses_efi(nvroot)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property '%s' not supported on " "EFI labeled devices"), propname); (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); zpool_close(zhp); goto error; }*/ zpool_close(zhp); break; case ZPOOL_PROP_ALTROOT: if (!create_or_import) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property '%s' can only be set during pool " "creation or import"), propname); (void) zfs_error(hdl, EZFS_BADPROP, errbuf); goto error; } if (strval[0] != '/') { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "bad alternate root '%s'"), strval); (void) zfs_error(hdl, EZFS_BADPATH, errbuf); goto error; } break; case ZPOOL_PROP_CACHEFILE: if (strval[0] == '\0') break; if (strcmp(strval, "none") == 0) break; if (strval[0] != '/') { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "property '%s' must be empty, an " "absolute path, or 'none'"), propname); (void) zfs_error(hdl, EZFS_BADPATH, errbuf); goto error; } slash = strrchr(strval, '/'); if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || strcmp(slash, "/..") == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is not a valid file"), strval); (void) zfs_error(hdl, EZFS_BADPATH, errbuf); goto error; } *slash = '\0'; if (strval[0] != '\0' && (stat64(strval, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' is not a valid directory"), strval); (void) zfs_error(hdl, EZFS_BADPATH, errbuf); goto error; } *slash = '/'; break; } } return (retprops); error: nvlist_free(retprops); return (NULL); } /* * Set zpool property : propname=propval. */ int zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) { zfs_cmd_t zc = { 0 }; int ret = -1; char errbuf[1024]; nvlist_t *nvl = NULL; nvlist_t *realprops; uint64_t version; (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), zhp->zpool_name); if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) return (no_memory(zhp->zpool_hdl)); if (nvlist_add_string(nvl, propname, propval) != 0) { nvlist_free(nvl); return (no_memory(zhp->zpool_hdl)); } version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, zhp->zpool_name, nvl, version, B_FALSE, errbuf)) == NULL) { nvlist_free(nvl); return (-1); } nvlist_free(nvl); nvl = realprops; /* * Execute the corresponding ioctl() to set this property. */ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { nvlist_free(nvl); return (-1); } ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); zcmd_free_nvlists(&zc); nvlist_free(nvl); if (ret) (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); else (void) zpool_props_refresh(zhp); return (ret); } int zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) { libzfs_handle_t *hdl = zhp->zpool_hdl; zprop_list_t *entry; char buf[ZFS_MAXPROPLEN]; if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) return (-1); for (entry = *plp; entry != NULL; entry = entry->pl_next) { if (entry->pl_fixed) continue; if (entry->pl_prop != ZPROP_INVAL && zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), NULL) == 0) { if (strlen(buf) > entry->pl_width) entry->pl_width = strlen(buf); } } return (0); } /* * Don't start the slice at the default block of 34; many storage * devices will use a stripe width of 128k, so start there instead. */ #define NEW_START_BLOCK 256 /* * Validate the given pool name, optionally putting an extended error message in * 'buf'. */ boolean_t zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) { namecheck_err_t why; char what; int ret; ret = pool_namecheck(pool, &why, &what); /* * The rules for reserved pool names were extended at a later point. * But we need to support users with existing pools that may now be * invalid. So we only check for this expanded set of names during a * create (or import), and only in userland. */ if (ret == 0 && !isopen && (strncmp(pool, "mirror", 6) == 0 || strncmp(pool, "raidz", 5) == 0 || strncmp(pool, "spare", 5) == 0 || strcmp(pool, "log") == 0)) { if (hdl != NULL) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "name is reserved")); return (B_FALSE); } if (ret != 0) { if (hdl != NULL) { switch (why) { case NAME_ERR_TOOLONG: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "name is too long")); break; case NAME_ERR_INVALCHAR: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid character " "'%c' in pool name"), what); break; case NAME_ERR_NOLETTER: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "name must begin with a letter")); break; case NAME_ERR_RESERVED: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "name is reserved")); break; case NAME_ERR_DISKLIKE: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool name is reserved")); break; case NAME_ERR_LEADING_SLASH: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "leading slash in name")); break; case NAME_ERR_EMPTY_COMPONENT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "empty component in name")); break; case NAME_ERR_TRAILING_SLASH: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "trailing slash in name")); break; case NAME_ERR_MULTIPLE_AT: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "multiple '@' delimiters in name")); break; } } return (B_FALSE); } return (B_TRUE); } /* * Open a handle to the given pool, even if the pool is currently in the FAULTED * state. */ zpool_handle_t * zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) { zpool_handle_t *zhp; boolean_t missing; /* * Make sure the pool name is valid. */ if (!zpool_name_valid(hdl, B_TRUE, pool)) { (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); return (NULL); } if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) return (NULL); zhp->zpool_hdl = hdl; (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); if (zpool_refresh_stats(zhp, &missing) != 0) { zpool_close(zhp); return (NULL); } if (missing) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); (void) zfs_error_fmt(hdl, EZFS_NOENT, dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); zpool_close(zhp); return (NULL); } return (zhp); } /* * Like the above, but silent on error. Used when iterating over pools (because * the configuration cache may be out of date). */ int zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) { zpool_handle_t *zhp; boolean_t missing; if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) return (-1); zhp->zpool_hdl = hdl; (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); if (zpool_refresh_stats(zhp, &missing) != 0) { zpool_close(zhp); return (-1); } if (missing) { zpool_close(zhp); *ret = NULL; return (0); } *ret = zhp; return (0); } /* * Similar to zpool_open_canfail(), but refuses to open pools in the faulted * state. */ zpool_handle_t * zpool_open(libzfs_handle_t *hdl, const char *pool) { zpool_handle_t *zhp; if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) return (NULL); if (zhp->zpool_state == POOL_STATE_UNAVAIL) { (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); zpool_close(zhp); return (NULL); } return (zhp); } /* * Close the handle. Simply frees the memory associated with the handle. */ void zpool_close(zpool_handle_t *zhp) { if (zhp->zpool_config) nvlist_free(zhp->zpool_config); if (zhp->zpool_old_config) nvlist_free(zhp->zpool_old_config); if (zhp->zpool_props) nvlist_free(zhp->zpool_props); free(zhp); } /* * Return the name of the pool. */ const char * zpool_get_name(zpool_handle_t *zhp) { return (zhp->zpool_name); } /* * Return the state of the pool (ACTIVE or UNAVAILABLE) */ int zpool_get_state(zpool_handle_t *zhp) { return (zhp->zpool_state); } /* * Create the named pool, using the provided vdev list. It is assumed * that the consumer has already validated the contents of the nvlist, so we * don't have to worry about error semantics. */ int zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, nvlist_t *props, nvlist_t *fsprops) { zfs_cmd_t zc = { 0 }; nvlist_t *zc_fsprops = NULL; nvlist_t *zc_props = NULL; char msg[1024]; char *altroot; int ret = -1; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot create '%s'"), pool); if (!zpool_name_valid(hdl, B_FALSE, pool)) return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) return (-1); if (props) { if ((zc_props = zpool_valid_proplist(hdl, pool, props, SPA_VERSION_1, B_TRUE, msg)) == NULL) { goto create_failed; } } if (fsprops) { uint64_t zoned; char *zonestr; zoned = ((nvlist_lookup_string(fsprops, zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && strcmp(zonestr, "on") == 0); if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { goto create_failed; } if (!zc_props && (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { goto create_failed; } if (nvlist_add_nvlist(zc_props, ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { goto create_failed; } } if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) goto create_failed; (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { zcmd_free_nvlists(&zc); nvlist_free(zc_props); nvlist_free(zc_fsprops); switch (errno) { case EBUSY: /* * This can happen if the user has specified the same * device multiple times. We can't reliably detect this * until we try to add it and see we already have a * label. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more vdevs refer to the same device")); return (zfs_error(hdl, EZFS_BADDEV, msg)); case EOVERFLOW: /* * This occurs when one of the devices is below * SPA_MINDEVSIZE. Unfortunately, we can't detect which * device was the problem device since there's no * reliable way to determine device size from userland. */ { char buf[64]; zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more devices is less than the " "minimum size (%s)"), buf); } return (zfs_error(hdl, EZFS_BADDEV, msg)); case ENOSPC: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more devices is out of space")); return (zfs_error(hdl, EZFS_BADDEV, msg)); case ENOTBLK: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cache device must be a disk or disk slice")); return (zfs_error(hdl, EZFS_BADDEV, msg)); default: return (zpool_standard_error(hdl, errno, msg)); } } /* * If this is an alternate root pool, then we automatically set the * mountpoint of the root dataset to be '/'. */ if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot) == 0) { zfs_handle_t *zhp; verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL); verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), "/") == 0); zfs_close(zhp); } create_failed: zcmd_free_nvlists(&zc); nvlist_free(zc_props); nvlist_free(zc_fsprops); return (ret); } /* * Destroy the given pool. It is up to the caller to ensure that there are no * datasets left in the pool. */ int zpool_destroy(zpool_handle_t *zhp) { zfs_cmd_t zc = { 0 }; zfs_handle_t *zfp = NULL; libzfs_handle_t *hdl = zhp->zpool_hdl; char msg[1024]; if (zhp->zpool_state == POOL_STATE_ACTIVE && (zfp = zfs_open(zhp->zpool_hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) return (-1); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot destroy '%s'"), zhp->zpool_name); if (errno == EROFS) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more devices is read only")); (void) zfs_error(hdl, EZFS_BADDEV, msg); } else { (void) zpool_standard_error(hdl, errno, msg); } if (zfp) zfs_close(zfp); return (-1); } if (zfp) { remove_mountpoint(zfp); zfs_close(zfp); } return (0); } /* * Add the given vdevs to the pool. The caller must have already performed the * necessary verification to ensure that the vdev specification is well-formed. */ int zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) { zfs_cmd_t zc = { 0 }; int ret; libzfs_handle_t *hdl = zhp->zpool_hdl; char msg[1024]; nvlist_t **spares, **l2cache; uint_t nspares, nl2cache; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot add to '%s'"), zhp->zpool_name); if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < SPA_VERSION_SPARES && nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " "upgraded to add hot spares")); return (zfs_error(hdl, EZFS_BADVERSION, msg)); } #if 0 if (pool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { uint64_t s; for (s = 0; s < nspares; s++) { char *path; if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, &path) == 0 && pool_uses_efi(spares[s])) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "device '%s' contains an EFI label and " "cannot be used on root pools."), zpool_vdev_name(hdl, NULL, spares[s], B_FALSE)); return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); } } } #endif if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < SPA_VERSION_L2CACHE && nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " "upgraded to add cache devices")); return (zfs_error(hdl, EZFS_BADVERSION, msg)); } if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) return (-1); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { switch (errno) { case EBUSY: /* * This can happen if the user has specified the same * device multiple times. We can't reliably detect this * until we try to add it and see we already have a * label. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more vdevs refer to the same device")); (void) zfs_error(hdl, EZFS_BADDEV, msg); break; case EOVERFLOW: /* * This occurrs when one of the devices is below * SPA_MINDEVSIZE. Unfortunately, we can't detect which * device was the problem device since there's no * reliable way to determine device size from userland. */ { char buf[64]; zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "device is less than the minimum " "size (%s)"), buf); } (void) zfs_error(hdl, EZFS_BADDEV, msg); break; case ENOTSUP: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgraded to add these vdevs")); (void) zfs_error(hdl, EZFS_BADVERSION, msg); break; case EDOM: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "root pool can not have multiple vdevs" " or separate logs")); (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); break; case ENOTBLK: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cache device must be a disk or disk slice")); (void) zfs_error(hdl, EZFS_BADDEV, msg); break; default: (void) zpool_standard_error(hdl, errno, msg); } ret = -1; } else { ret = 0; } zcmd_free_nvlists(&zc); return (ret); } /* * Exports the pool from the system. The caller must ensure that there are no * mounted datasets in the pool. */ int zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce) { zfs_cmd_t zc = { 0 }; char msg[1024]; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot export '%s'"), zhp->zpool_name); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); zc.zc_cookie = force; zc.zc_guid = hardforce; #define ZFSFUSE_BUSY_SLEEP_FACTOR 500000 // .5 seconds was chosen ater some tuning int retry = 0; int ret; while ((ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc)) == EBUSY && retry++ < 6) { struct timeval timeout; /* Something in the way zfs-fuse works keeps the datasets busy for * longer than expected. * If we try to export/destroy a pool containing a few fs like * pool/fs1/fs2, then it will try to export it much before the umounts * are really finished. * The sleep is a temporary workaround here. * The zfsfuse_destroy function is called after umount has already * returned, so the only solution is to allow a pause here in case the * export fails with EBUSY */ timeout.tv_sec=0; timeout.tv_usec=ZFSFUSE_BUSY_SLEEP_FACTOR; VERIFY(select(0,NULL,NULL,NULL,&timeout)==0); } if (retry>0) syslog(LOG_WARNING, "Pool '%s' was busy, export was tried for %0.1fs (%i attempts) resulting in %s", zhp->zpool_name, (retry*ZFSFUSE_BUSY_SLEEP_FACTOR)/100000.0, retry, strerror(errno)); if (ret != 0) { switch (errno) { case EXDEV: zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, "use '-f' to override the following errors:\n" "'%s' has an active shared spare which could be" " used by other pools once '%s' is exported."), zhp->zpool_name, zhp->zpool_name); return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, msg)); default: return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, msg)); } } return (0); } int zpool_export(zpool_handle_t *zhp, boolean_t force) { return (zpool_export_common(zhp, force, B_FALSE)); } int zpool_export_force(zpool_handle_t *zhp) { return (zpool_export_common(zhp, B_TRUE, B_TRUE)); } static void zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, nvlist_t *rbi) { uint64_t rewindto; int64_t loss = -1; struct tm t; char timestr[128]; if (!hdl->libzfs_printerr || rbi == NULL) return; if (nvlist_lookup_uint64(rbi, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) return; (void) nvlist_lookup_int64(rbi, ZPOOL_CONFIG_REWIND_TIME, &loss); if (localtime_r((time_t *)&rewindto, &t) != NULL && strftime(timestr, 128, "%F", &t) != 0) { if (dryrun) { (void) printf(dgettext(TEXT_DOMAIN, "Would be able to return %s " "to its state as of %s.\n"), name, timestr); } else { (void) printf(dgettext(TEXT_DOMAIN, "Pool %s returned to its state as of %s.\n"), name, timestr); } if (loss > 120) { (void) printf(dgettext(TEXT_DOMAIN, "%s approximately " FI64 " "), dryrun ? "Would discard" : "Discarded", (loss + 30) / 60); (void) printf(dgettext(TEXT_DOMAIN, "minutes of transactions.\n")); } else if (loss > 0) { (void) printf(dgettext(TEXT_DOMAIN, "%s approximately " FI64 " "), dryrun ? "Would discard" : "Discarded", loss); (void) printf(dgettext(TEXT_DOMAIN, "seconds of transactions.\n")); } } } void zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, nvlist_t *config) { int64_t loss = -1; uint64_t edata = UINT64_MAX; uint64_t rewindto; struct tm t; char timestr[128]; if (!hdl->libzfs_printerr) return; if (reason >= 0) (void) printf(dgettext(TEXT_DOMAIN, "action: ")); else (void) printf(dgettext(TEXT_DOMAIN, "\t")); /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) goto no_info; (void) nvlist_lookup_int64(config, ZPOOL_CONFIG_REWIND_TIME, &loss); (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_LOAD_DATA_ERRORS, &edata); (void) printf(dgettext(TEXT_DOMAIN, "Recovery is possible, but will result in some data loss.\n")); if (localtime_r((time_t *)&rewindto, &t) != NULL && strftime(timestr, 128, "%F", &t) != 0) { (void) printf(dgettext(TEXT_DOMAIN, "\tReturning the pool to its state as of %s\n" "\tshould correct the problem. "), timestr); } else { (void) printf(dgettext(TEXT_DOMAIN, "\tReverting the pool to an earlier state " "should correct the problem.\n\t")); } if (loss > 120) { (void) printf(dgettext(TEXT_DOMAIN, "Approximately " FI64 " minutes of data\n" "\tmust be discarded, irreversibly. "), (loss + 30) / 60); } else if (loss > 0) { (void) printf(dgettext(TEXT_DOMAIN, "Approximately " FI64 " seconds of data\n" "\tmust be discarded, irreversibly. "), loss); } if (edata != 0 && edata != UINT64_MAX) { if (edata == 1) { (void) printf(dgettext(TEXT_DOMAIN, "After rewind, at least\n" "\tone persistent user-data error will remain. ")); } else { (void) printf(dgettext(TEXT_DOMAIN, "After rewind, several\n" "\tpersistent user-data errors will remain. ")); } } (void) printf(dgettext(TEXT_DOMAIN, "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), reason >= 0 ? "clear" : "import", name); (void) printf(dgettext(TEXT_DOMAIN, "A scrub of the pool\n" "\tis strongly recommended after recovery.\n")); return; no_info: (void) printf(dgettext(TEXT_DOMAIN, "Destroy and re-create the pool from\n\ta backup source.\n")); } /* * zpool_import() is a contracted interface. Should be kept the same * if possible. * * Applications should use zpool_import_props() to import a pool with * new properties value to be set. */ int zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, char *altroot) { nvlist_t *props = NULL; int ret; if (altroot != NULL) { if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { return (zfs_error_fmt(hdl, EZFS_NOMEM, dgettext(TEXT_DOMAIN, "cannot import '%s'"), newname)); } if (nvlist_add_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || nvlist_add_string(props, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { nvlist_free(props); return (zfs_error_fmt(hdl, EZFS_NOMEM, dgettext(TEXT_DOMAIN, "cannot import '%s'"), newname)); } } ret = zpool_import_props(hdl, config, newname, props, B_FALSE); if (props) nvlist_free(props); return (ret); } /* * Import the given pool using the known configuration and a list of * properties to be set. The configuration should have come from * zpool_find_import(). The 'newname' parameters control whether the pool * is imported with a different name. */ int zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, nvlist_t *props, boolean_t importfaulted) { zfs_cmd_t zc = { 0 }; zpool_rewind_policy_t policy; nvlist_t *nvi = NULL; char *thename; char *origname; uint64_t returned_size; int ret; char errbuf[1024]; verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &origname) == 0); (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot import pool '%s'"), origname); if (newname != NULL) { if (!zpool_name_valid(hdl, B_FALSE, newname)) return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, dgettext(TEXT_DOMAIN, "cannot import '%s'"), newname)); thename = (char *)newname; } else { thename = origname; } if (props) { uint64_t version; verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &version) == 0); if ((props = zpool_valid_proplist(hdl, origname, props, version, B_TRUE, errbuf)) == NULL) { return (-1); } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { nvlist_free(props); return (-1); } } (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &zc.zc_guid) == 0); if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { nvlist_free(props); return (-1); } returned_size = zc.zc_nvlist_conf_size + 512; if (zcmd_alloc_dst_nvlist(hdl, &zc, returned_size) != 0) { nvlist_free(props); return (-1); } zc.zc_cookie = (uint64_t)importfaulted; ret = 0; if (zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc) != 0) { char desc[1024]; (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); zpool_get_rewind_policy(config, &policy); /* * Dry-run failed, but we print out what success * looks like if we found a best txg */ if ((policy.zrp_request & ZPOOL_TRY_REWIND) && nvi) { zpool_rewind_exclaim(hdl, newname ? origname : thename, B_TRUE, nvi); nvlist_free(nvi); return (-1); } if (newname == NULL) (void) snprintf(desc, sizeof (desc), dgettext(TEXT_DOMAIN, "cannot import '%s'"), thename); else (void) snprintf(desc, sizeof (desc), dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), origname, thename); switch (errno) { case ENOTSUP: /* * Unsupported version. */ (void) zfs_error(hdl, EZFS_BADVERSION, desc); break; case EINVAL: (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); break; case EROFS: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "one or more devices is read only")); (void) zfs_error(hdl, EZFS_BADDEV, desc); break; default: (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); (void) zpool_standard_error(hdl, errno, desc); zpool_explain_recover(hdl, newname ? origname : thename, -errno, nvi); nvlist_free(nvi); break; } ret = -1; } else { zpool_handle_t *zhp; /* * This should never fail, but play it safe anyway. */ if (zpool_open_silent(hdl, thename, &zhp) != 0) ret = -1; else if (zhp != NULL) zpool_close(zhp); (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); zpool_get_rewind_policy(config, &policy); if (policy.zrp_request & (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { zpool_rewind_exclaim(hdl, newname ? origname : thename, ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nvi); } nvlist_free(nvi); return (0); } zcmd_free_nvlists(&zc); nvlist_free(props); return (ret); } /* * Scan the pool. */ int zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) { zfs_cmd_t zc = { 0 }; char msg[1024]; libzfs_handle_t *hdl = zhp->zpool_hdl; (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); zc.zc_cookie = func; if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || (errno == ENOENT && func != POOL_SCAN_NONE)) return (0); if (func == POOL_SCAN_SCRUB) { (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); } else if (func == POOL_SCAN_NONE) { (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), zc.zc_name); } else { assert(!"unexpected result"); } if (errno == EBUSY) { nvlist_t *nvroot; pool_scan_stat_t *ps = NULL; uint_t psc; verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); (void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); if (ps && ps->pss_func == POOL_SCAN_SCRUB) return (zfs_error(hdl, EZFS_SCRUBBING, msg)); else return (zfs_error(hdl, EZFS_RESILVERING, msg)); } else if (errno == ENOENT) { return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); } else { return (zpool_standard_error(hdl, errno, msg)); } } /* * Find a vdev that matches the search criteria specified. We use the * the nvpair name to determine how we should look for the device. * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL * spare; but FALSE if its an INUSE spare. */ static nvlist_t * vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) { uint_t c, children; nvlist_t **child; nvlist_t *ret; uint64_t is_log; char *srchkey; nvpair_t *pair = nvlist_next_nvpair(search, NULL); /* Nothing to look for */ if (search == NULL || pair == NULL) return (NULL); /* Obtain the key we will use to search */ srchkey = nvpair_name(pair); switch (nvpair_type(pair)) { case DATA_TYPE_UINT64: { uint64_t srchval, theguid, present; verify(nvpair_value_uint64(pair, &srchval) == 0); if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &present) == 0) { /* * If the device has never been present since * import, the only reliable way to match the * vdev is by GUID. */ verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &theguid) == 0); if (theguid == srchval) return (nv); } } break; } case DATA_TYPE_STRING: { char *srchval, *val; verify(nvpair_value_string(pair, &srchval) == 0); if (nvlist_lookup_string(nv, srchkey, &val) != 0) break; /* * Search for the requested value. We special case the search * for ZPOOL_CONFIG_PATH when it's a wholedisk and when * Looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). * Otherwise, all other searches are simple string compares. */ if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && val) { uint64_t wholedisk = 0; (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk); if (wholedisk) { /* * For whole disks, the internal path has 's0', * but the path passed in by the user doesn't. */ if (strlen(srchval) == strlen(val) - 2 && strncmp(srchval, val, strlen(srchval)) == 0) return (nv); break; } } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { char *type, *idx, *end, *p; uint64_t id, vdev_id; /* * Determine our vdev type, keeping in mind * that the srchval is composed of a type and * vdev id pair (i.e. mirror-4). */ if ((type = strdup(srchval)) == NULL) return (NULL); if ((p = strrchr(type, '-')) == NULL) { free(type); break; } idx = p + 1; *p = '\0'; /* * If the types don't match then keep looking. */ if (strncmp(val, type, strlen(val)) != 0) { free(type); break; } verify(strncmp(type, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || strncmp(type, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0); verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &id) == 0); errno = 0; vdev_id = strtoull(idx, &end, 10); free(type); if (errno != 0) return (NULL); /* * Now verify that we have the correct vdev id. */ if (vdev_id == id) return (nv); } /* * Common case */ if (strcmp(srchval, val) == 0) return (nv); break; } default: break; } if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) return (NULL); for (c = 0; c < children; c++) { if ((ret = vdev_to_nvlist_iter(child[c], search, avail_spare, l2cache, NULL)) != NULL) { /* * The 'is_log' value is only set for the toplevel * vdev, not the leaf vdevs. So we always lookup the * log device from the root of the vdev tree (where * 'log' is non-NULL). */ if (log != NULL && nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && is_log) { *log = B_TRUE; } return (ret); } } if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child, &children) == 0) { for (c = 0; c < children; c++) { if ((ret = vdev_to_nvlist_iter(child[c], search, avail_spare, l2cache, NULL)) != NULL) { *avail_spare = B_TRUE; return (ret); } } } if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, &child, &children) == 0) { for (c = 0; c < children; c++) { if ((ret = vdev_to_nvlist_iter(child[c], search, avail_spare, l2cache, NULL)) != NULL) { *l2cache = B_TRUE; return (ret); } } } return (NULL); } /* * Given a physical path (minus the "/devices" prefix), find the * associated vdev. */ nvlist_t * zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) { nvlist_t *search, *nvroot, *ret; verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); *avail_spare = B_FALSE; ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); nvlist_free(search); return (ret); } /* * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). */ boolean_t zpool_vdev_is_interior(const char *name) { if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) return (B_TRUE); return (B_FALSE); } nvlist_t * zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) { char buf[MAXPATHLEN]; char *end; nvlist_t *nvroot, *search, *ret; uint64_t guid; verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); guid = strtoull(path, &end, 10); if (guid != 0 && *end == '\0') { verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); } else if (zpool_vdev_is_interior(path)) { verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); } else if (path[0] != '/') { (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path); verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); } else { verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); } verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); *avail_spare = B_FALSE; *l2cache = B_FALSE; if (log != NULL) *log = B_FALSE; ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); nvlist_free(search); return (ret); } static int vdev_online(nvlist_t *nv) { uint64_t ival; if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) return (0); return (1); } /* * Helper function for zpool_get_physpaths(). */ static int vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, size_t *bytes_written) { size_t bytes_left, pos, rsz; char *tmppath; const char *format; if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, &tmppath) != 0) return (EZFS_NODEVICE); pos = *bytes_written; bytes_left = physpath_size - pos; format = (pos == 0) ? "%s" : " %s"; rsz = snprintf(physpath + pos, bytes_left, format, tmppath); *bytes_written += rsz; if (rsz >= bytes_left) { /* if physpath was not copied properly, clear it */ if (bytes_left != 0) { physpath[pos] = 0; } return (EZFS_NOSPC); } return (0); } static int vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, size_t *rsz, boolean_t is_spare) { char *type; int ret; if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) return (EZFS_INVALCONFIG); if (strcmp(type, VDEV_TYPE_DISK) == 0) { /* * An active spare device has ZPOOL_CONFIG_IS_SPARE set. * For a spare vdev, we only want to boot from the active * spare device. */ if (is_spare) { uint64_t spare = 0; (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, &spare); if (!spare) return (EZFS_INVALCONFIG); } if (vdev_online(nv)) { if ((ret = vdev_get_one_physpath(nv, physpath, phypath_size, rsz)) != 0) return (ret); } } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || strcmp(type, VDEV_TYPE_REPLACING) == 0 || (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { nvlist_t **child; uint_t count; int i, ret; if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) return (EZFS_INVALCONFIG); for (i = 0; i < count; i++) { ret = vdev_get_physpaths(child[i], physpath, phypath_size, rsz, is_spare); if (ret == EZFS_NOSPC) return (ret); } } return (EZFS_POOL_INVALARG); } /* * Get phys_path for a root pool config. * Return 0 on success; non-zero on failure. */ static int zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) { size_t rsz; nvlist_t *vdev_root; nvlist_t **child; uint_t count; char *type; rsz = 0; if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &vdev_root) != 0) return (EZFS_INVALCONFIG); if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) return (EZFS_INVALCONFIG); /* * root pool can not have EFI labeled disks and can only have * a single top-level vdev. */ #if 0 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || pool_uses_efi(vdev_root)) return (EZFS_POOL_INVALARG); #endif (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, B_FALSE); /* No online devices */ if (rsz == 0) return (EZFS_NODEVICE); return (0); } /* * Get phys_path for a root pool * Return 0 on success; non-zero on failure. */ int zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) { return (zpool_get_config_physpath(zhp->zpool_config, physpath, phypath_size)); } /* * If the device has being dynamically expanded then we need to relabel * the disk to use the new unallocated space. */ static int zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) { char path[MAXPATHLEN]; char errbuf[1024]; int fd, error; int (*_efi_use_whole_disk)(int); if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, "efi_use_whole_disk")) == NULL) return (-1); (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " "relabel '%s': unable to open device"), name); return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); } /* * It's possible that we might encounter an error if the device * does not have any unallocated space left. If so, we simply * ignore that error and continue on. */ /* zfs-fuse : no efi function here, this should be fixed later if * possible... * error = _efi_use_whole_disk(fd); */ (void) close(fd); /* if (error && error != VT_ENOSPC) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " "relabel '%s': unable to read disk capacity"), name); return (zfs_error(hdl, EZFS_NOCAP, errbuf)); } */ return (0); } /* * Bring the specified vdev online. The 'flags' parameter is a set of the * ZFS_ONLINE_* flags. */ int zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, vdev_state_t *newstate) { zfs_cmd_t zc = { 0 }; char msg[1024]; nvlist_t *tgt; boolean_t avail_spare, l2cache, islog; libzfs_handle_t *hdl = zhp->zpool_hdl; if (flags & ZFS_ONLINE_EXPAND) { (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot expand %s"), path); } else { (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot online %s"), path); } (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, &islog)) == NULL) return (zfs_error(hdl, EZFS_NODEVICE, msg)); verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); if (avail_spare) return (zfs_error(hdl, EZFS_ISSPARE, msg)); if (flags & ZFS_ONLINE_EXPAND || zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { char *pathname = NULL; uint64_t wholedisk = 0; (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk); verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0); /* * XXX - L2ARC 1.0 devices can't support expansion. */ if (l2cache) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot expand cache devices")); return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); } if (wholedisk) { pathname += strlen(DISK_ROOT) + 1; (void) zpool_relabel_disk(zhp->zpool_hdl, pathname); } } zc.zc_cookie = VDEV_STATE_ONLINE; zc.zc_obj = flags; if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { if (errno == EINVAL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " "from this pool into a new one. Use '%s' " "instead"), "zpool detach"); return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); } return (zpool_standard_error(hdl, errno, msg)); } *newstate = zc.zc_cookie; return (0); } /* * Take the specified vdev offline */ int zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) { zfs_cmd_t zc = { 0 }; char msg[1024]; nvlist_t *tgt; boolean_t avail_spare, l2cache; libzfs_handle_t *hdl = zhp->zpool_hdl; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot offline %s"), path); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, NULL)) == NULL) return (zfs_error(hdl, EZFS_NODEVICE, msg)); verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); if (avail_spare) return (zfs_error(hdl, EZFS_ISSPARE, msg)); zc.zc_cookie = VDEV_STATE_OFFLINE; zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) return (0); switch (errno) { case EBUSY: /* * There are no other replicas of this device. */ return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); case EEXIST: /* * The log device has unplayed logs */ return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); default: return (zpool_standard_error(hdl, errno, msg)); } } /* * Mark the given vdev faulted. */ int zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) { zfs_cmd_t zc = { 0 }; char msg[1024]; libzfs_handle_t *hdl = zhp->zpool_hdl; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t) guid); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); zc.zc_guid = guid; zc.zc_cookie = VDEV_STATE_FAULTED; zc.zc_obj = aux; if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) return (0); switch (errno) { case EBUSY: /* * There are no other replicas of this device. */ return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); default: return (zpool_standard_error(hdl, errno, msg)); } } /* * Mark the given vdev degraded. */ int zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) { zfs_cmd_t zc = { 0 }; char msg[1024]; libzfs_handle_t *hdl = zhp->zpool_hdl; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t) guid); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); zc.zc_guid = guid; zc.zc_cookie = VDEV_STATE_DEGRADED; zc.zc_obj = aux; if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) return (0); return (zpool_standard_error(hdl, errno, msg)); } /* * Returns TRUE if the given nvlist is a vdev that was originally swapped in as * a hot spare. */ static boolean_t is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) { nvlist_t **child; uint_t c, children; char *type; if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, &children) == 0) { verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, &type) == 0); if (strcmp(type, VDEV_TYPE_SPARE) == 0 && children == 2 && child[which] == tgt) return (B_TRUE); for (c = 0; c < children; c++) if (is_replacing_spare(child[c], tgt, which)) return (B_TRUE); } return (B_FALSE); } /* * Attach new_disk (fully described by nvroot) to old_disk. * If 'replacing' is specified, the new disk will replace the old one. */ int zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) { zfs_cmd_t zc = { 0 }; char msg[1024]; int ret; nvlist_t *tgt; boolean_t avail_spare, l2cache, islog; uint64_t val; char *path, *newname; nvlist_t **child; uint_t children; nvlist_t *config_root; libzfs_handle_t *hdl = zhp->zpool_hdl; boolean_t rootpool = pool_is_bootable(zhp); if (replacing) (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot replace %s with %s"), old_disk, new_disk); else (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot attach %s to %s"), new_disk, old_disk); /* * If this is a root pool, make sure that we're not attaching an * EFI labeled device. */ #if 0 if (rootpool && pool_uses_efi(nvroot)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "EFI labeled devices are not supported on root pools.")); return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); } #endif (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, &islog)) == 0) return (zfs_error(hdl, EZFS_NODEVICE, msg)); if (avail_spare) return (zfs_error(hdl, EZFS_ISSPARE, msg)); if (l2cache) return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); zc.zc_cookie = replacing; if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0 || children != 1) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "new device must be a single disk")); return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); } verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) return (-1); /* * If the target is a hot spare that has been swapped in, we can only * replace it with another hot spare. */ if (replacing && nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, NULL) == NULL || !avail_spare) && is_replacing_spare(config_root, tgt, 1)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "can only be replaced by another hot spare")); free(newname); return (zfs_error(hdl, EZFS_BADTARGET, msg)); } /* * If we are attempting to replace a spare, it canot be applied to an * already spared device. */ if (replacing && nvlist_lookup_string(child[0], ZPOOL_CONFIG_PATH, &path) == 0 && zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, NULL) != NULL && avail_spare && is_replacing_spare(config_root, tgt, 0)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "device has already been replaced with a spare")); free(newname); return (zfs_error(hdl, EZFS_BADTARGET, msg)); } free(newname); if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) return (-1); ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_VDEV_ATTACH, &zc); zcmd_free_nvlists(&zc); if (ret == 0) { if (rootpool) { /* * XXX - This should be removed once we can * automatically install the bootblocks on the * newly attached disk. */ (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Please " "be sure to invoke %s to make '%s' bootable.\n"), BOOTCMD, new_disk); /* * XXX need a better way to prevent user from * booting up a half-baked vdev. */ (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " "sure to wait until resilver is done " "before rebooting.\n")); } return (0); } switch (errno) { case ENOTSUP: /* * Can't attach to or replace this type of vdev. */ if (replacing) { if (islog) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot replace a log with a spare")); else zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot replace a replacing device")); } else { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "can only attach to mirrors and top-level " "disks")); } (void) zfs_error(hdl, EZFS_BADTARGET, msg); break; case EINVAL: /* * The new device must be a single disk. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "new device must be a single disk")); (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); break; case EBUSY: zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), new_disk); (void) zfs_error(hdl, EZFS_BADDEV, msg); break; case EOVERFLOW: /* * The new device is too small. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "device is too small")); (void) zfs_error(hdl, EZFS_BADDEV, msg); break; case EDOM: /* * The new device has a different alignment requirement. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "devices have different sector alignment")); (void) zfs_error(hdl, EZFS_BADDEV, msg); break; case ENAMETOOLONG: /* * The resulting top-level vdev spec won't fit in the label. */ (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); break; default: (void) zpool_standard_error(hdl, errno, msg); } return (-1); } /* * Detach the specified device. */ int zpool_vdev_detach(zpool_handle_t *zhp, const char *path) { zfs_cmd_t zc = { 0 }; char msg[1024]; nvlist_t *tgt; boolean_t avail_spare, l2cache; libzfs_handle_t *hdl = zhp->zpool_hdl; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot detach %s"), path); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, NULL)) == 0) return (zfs_error(hdl, EZFS_NODEVICE, msg)); if (avail_spare) return (zfs_error(hdl, EZFS_ISSPARE, msg)); if (l2cache) return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) return (0); switch (errno) { case ENOTSUP: /* * Can't detach from this type of vdev. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " "applicable to mirror and replacing vdevs")); (void) zfs_error(zhp->zpool_hdl, EZFS_BADTARGET, msg); break; case EBUSY: /* * There are no other replicas of this device. */ (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); break; default: (void) zpool_standard_error(hdl, errno, msg); } return (-1); } /* * Find a mirror vdev in the source nvlist. * * The mchild array contains a list of disks in one of the top-level mirrors * of the source pool. The schild array contains a list of disks that the * user specified on the command line. We loop over the mchild array to * see if any entry in the schild array matches. * * If a disk in the mchild array is found in the schild array, we return * the index of that entry. Otherwise we return -1. */ static int find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, nvlist_t **schild, uint_t schildren) { uint_t mc; for (mc = 0; mc < mchildren; mc++) { uint_t sc; char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, mchild[mc], B_FALSE); for (sc = 0; sc < schildren; sc++) { char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, schild[sc], B_FALSE); boolean_t result = (strcmp(mpath, spath) == 0); free(spath); if (result) { free(mpath); return (mc); } } free(mpath); } return (-1); } /* * Split a mirror pool. If newroot points to null, then a new nvlist * is generated and it is the responsibility of the caller to free it. */ int zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, nvlist_t *props, splitflags_t flags) { zfs_cmd_t zc = { 0 }; char msg[1024]; nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; nvlist_t **varray = NULL, *zc_props = NULL; uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; libzfs_handle_t *hdl = zhp->zpool_hdl; uint64_t vers; boolean_t freelist = B_FALSE, memory_err = B_TRUE; int retval = 0; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); if (!zpool_name_valid(hdl, B_FALSE, newname)) return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); if ((config = zpool_get_config(zhp, NULL)) == NULL) { (void) fprintf(stderr, gettext("Internal error: unable to " "retrieve pool configuration\n")); return (-1); } verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) == 0); verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); if (props) { if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, props, vers, B_TRUE, msg)) == NULL) return (-1); } if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Source pool is missing vdev tree")); if (zc_props) nvlist_free(zc_props); return (-1); } varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); vcount = 0; if (*newroot == NULL || nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, &newchild, &newchildren) != 0) newchildren = 0; for (c = 0; c < children; c++) { uint64_t is_log = B_FALSE, is_hole = B_FALSE; char *type; nvlist_t **mchild, *vdev; uint_t mchildren; int entry; /* * Unlike cache & spares, slogs are stored in the * ZPOOL_CONFIG_CHILDREN array. We filter them out here. */ (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, &is_log); (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, &is_hole); if (is_log || is_hole) { /* * Create a hole vdev and put it in the config. */ if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) goto out; if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) != 0) goto out; if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 1) != 0) goto out; if (lastlog == 0) lastlog = vcount; varray[vcount++] = vdev; continue; } lastlog = 0; verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) == 0); if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Source pool must be composed only of mirrors\n")); retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); goto out; } verify(nvlist_lookup_nvlist_array(child[c], ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); /* find or add an entry for this top-level vdev */ if (newchildren > 0 && (entry = find_vdev_entry(zhp, mchild, mchildren, newchild, newchildren)) >= 0) { /* We found a disk that the user specified. */ vdev = mchild[entry]; ++found; } else { /* User didn't specify a disk for this vdev. */ vdev = mchild[mchildren - 1]; } if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) goto out; } /* did we find every disk the user specified? */ if (found != newchildren) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " "include at most one disk from each mirror")); retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); goto out; } /* Prepare the nvlist for populating. */ if (*newroot == NULL) { if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) goto out; freelist = B_TRUE; if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0) goto out; } else { verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); } /* Add all the children we found */ if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, lastlog == 0 ? vcount : lastlog) != 0) goto out; /* * If we're just doing a dry run, exit now with success. */ if (flags.dryrun) { memory_err = B_FALSE; freelist = B_FALSE; goto out; } /* now build up the config list & call the ioctl */ if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) goto out; if (nvlist_add_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || nvlist_add_string(newconfig, ZPOOL_CONFIG_POOL_NAME, newname) != 0 || nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) goto out; /* * The new pool is automatically part of the namespace unless we * explicitly export it. */ if (!flags.import) zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) goto out; if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) goto out; if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { retval = zpool_standard_error(hdl, errno, msg); goto out; } freelist = B_FALSE; memory_err = B_FALSE; out: if (varray != NULL) { int v; for (v = 0; v < vcount; v++) nvlist_free(varray[v]); free(varray); } zcmd_free_nvlists(&zc); if (zc_props) nvlist_free(zc_props); if (newconfig) nvlist_free(newconfig); if (freelist) { nvlist_free(*newroot); *newroot = NULL; } if (retval != 0) return (retval); if (memory_err) return (no_memory(hdl)); return (0); } /* * Remove the given device. Currently, this is supported only for hot spares * and level 2 cache devices. */ int zpool_vdev_remove(zpool_handle_t *zhp, const char *path) { zfs_cmd_t zc = { 0 }; char msg[1024]; nvlist_t *tgt; boolean_t avail_spare, l2cache, islog; libzfs_handle_t *hdl = zhp->zpool_hdl; uint64_t version; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot remove %s"), path); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, &islog)) == 0) return (zfs_error(hdl, EZFS_NODEVICE, msg)); /* * XXX - this should just go away. */ if (!avail_spare && !l2cache && !islog) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only inactive hot spares, cache, top-level, " "or log devices can be removed")); return (zfs_error(hdl, EZFS_NODEVICE, msg)); } version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); if (islog && version < SPA_VERSION_HOLES) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be upgrade to support log removal")); return (zfs_error(hdl, EZFS_BADVERSION, msg)); } verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) return (0); return (zpool_standard_error(hdl, errno, msg)); } /* * Clear the errors for the pool, or the particular device if specified. */ int zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) { zfs_cmd_t zc = { 0 }; char msg[1024]; nvlist_t *tgt; zpool_rewind_policy_t policy; boolean_t avail_spare, l2cache; libzfs_handle_t *hdl = zhp->zpool_hdl; nvlist_t *nvi = NULL; if (path) (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), path); else (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), zhp->zpool_name); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); if (path) { if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, NULL)) == 0) return (zfs_error(hdl, EZFS_NODEVICE, msg)); /* * Don't allow error clearing for hot spares. Do allow * error clearing for l2cache devices. */ if (avail_spare) return (zfs_error(hdl, EZFS_ISSPARE, msg)); verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); } zpool_get_rewind_policy(rewindnvl, &policy); zc.zc_cookie = policy.zrp_request; if (zcmd_alloc_dst_nvlist(hdl, &zc, 8192) != 0) return (-1); if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, rewindnvl) != 0) return (-1); if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0 || ((policy.zrp_request & ZPOOL_TRY_REWIND) && errno != EPERM && errno != EACCES)) { if (policy.zrp_request & (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); zpool_rewind_exclaim(hdl, zc.zc_name, ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nvi); nvlist_free(nvi); } zcmd_free_nvlists(&zc); return (0); } zcmd_free_nvlists(&zc); return (zpool_standard_error(hdl, errno, msg)); } /* * Similar to zpool_clear(), but takes a GUID (used by fmd). */ int zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) { zfs_cmd_t zc = { 0 }; char msg[1024]; libzfs_handle_t *hdl = zhp->zpool_hdl; (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), (longlong_t) guid); (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); zc.zc_guid = guid; if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) return (0); return (zpool_standard_error(hdl, errno, msg)); } /* * Convert from a devid string to a path. */ static char * devid_to_path(char *devid_str) { ddi_devid_t devid; char *minor; char *path; devid_nmlist_t *list = NULL; int ret; if (devid_str_decode(devid_str, &devid, &minor) != 0) return (NULL); ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); devid_str_free(minor); devid_free(devid); if (ret != 0) return (NULL); if ((path = strdup(list[0].devname)) == NULL) return (NULL); devid_free_nmlist(list); return (path); } /* * Convert from a path to a devid string. */ static char * path_to_devid(const char *path) { int fd; ddi_devid_t devid; char *minor, *ret; if ((fd = open(path, O_RDONLY)) < 0) return (NULL); minor = NULL; ret = NULL; if (devid_get(fd, &devid) == 0) { if (devid_get_minor_name(fd, &minor) == 0) ret = devid_str_encode(devid, minor); if (minor != NULL) devid_str_free(minor); devid_free(devid); } (void) close(fd); return (ret); } /* * Issue the necessary ioctl() to update the stored path value for the vdev. We * ignore any failure here, since a common case is for an unprivileged user to * type 'zpool status', and we'll display the correct information anyway. */ static void set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) { zfs_cmd_t zc = { 0 }; (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); } /* * Given a vdev, return the name to display in iostat. If the vdev has a path, * we use that, stripping off any leading "/dev/"; if not, we use the type. * We also check if this is a whole disk, in which case we strip off the * trailing 's0' slice name. * * This routine is also responsible for identifying when disks have been * reconfigured in a new location. The kernel will have opened the device by * devid, but the path will still refer to the old location. To catch this, we * first do a path -> devid translation (which is fast for the common case). If * the devid matches, we're done. If not, we do a reverse devid -> path * translation and issue the appropriate ioctl() to update the path of the vdev. * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any * of these checks. */ /* * zfs-fuse FIXME: Handle this properly */ char * zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, boolean_t verbose) { char *path, *devid; uint64_t value; char buf[64]; vdev_stat_t *vs; uint_t vsc; if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0) { verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) == 0); (void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value); path = buf; } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) { /* * If the device is dead (faulted, offline, etc) then don't * bother opening it. Otherwise we may be forcing the user to * open a misbehaving device, which can have undesirable * effects. */ if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) != 0 || vs->vs_state >= VDEV_STATE_DEGRADED) && zhp != NULL && nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { /* * Determine if the current path is correct. */ char *newdevid = path_to_devid(path); if (newdevid == NULL || strcmp(devid, newdevid) != 0) { char *newpath; if ((newpath = devid_to_path(devid)) != NULL) { /* * Update the path appropriately. */ set_path(zhp, nv, newpath); if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, newpath) == 0) verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0); free(newpath); } } if (newdevid) devid_str_free(newdevid); } if (strncmp(path, "/dev/", 5) == 0) path += 5; } else { verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); /* * If it's a raidz device, we need to stick in the parity level. */ if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, &value) == 0); (void) snprintf(buf, sizeof (buf), "%s%llu", path, (u_longlong_t)value); path = buf; } char str[64]; strcpy(str,path); /* * We identify each top-level vdev by using a <type-id> * naming convention. */ if (verbose) { uint64_t id; verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &id) == 0); (void) snprintf(buf, sizeof (buf), "%s-%llu", str, (u_longlong_t)id); path = buf; } } return (zfs_strdup(hdl, path)); } static int zbookmark_compare(const void *a, const void *b) { return (memcmp(a, b, sizeof (zbookmark_t))); } /* * Retrieve the persistent error log, uniquify the members, and return to the * caller. */ int zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) { zfs_cmd_t zc = { 0 }; uint64_t count; zbookmark_t *zb = NULL; int i; /* * Retrieve the raw error list from the kernel. If the number of errors * has increased, allocate more space and continue until we get the * entire list. */ verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, &count) == 0); if (count == 0) return (0); if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, count * sizeof (zbookmark_t))) == (uintptr_t)NULL) return (-1); zc.zc_nvlist_dst_size = count; (void) strcpy(zc.zc_name, zhp->zpool_name); for (;;) { if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, &zc) != 0) { free((void *)(uintptr_t)zc.zc_nvlist_dst); if (errno == ENOMEM) { count = zc.zc_nvlist_dst_size; if ((zc.zc_nvlist_dst = (uintptr_t) zfs_alloc(zhp->zpool_hdl, count * sizeof (zbookmark_t))) == (uintptr_t)NULL) return (-1); } else { return (-1); } } else { break; } } /* * Sort the resulting bookmarks. This is a little confusing due to the * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks * _not_ copied as part of the process. So we point the start of our * array appropriate and decrement the total number of elements. */ zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) + zc.zc_nvlist_dst_size; count -= zc.zc_nvlist_dst_size; void *nvlist_dst = (void *)(uintptr_t) zc.zc_nvlist_dst; qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare); verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); /* * Fill in the nverrlistp with nvlist's of dataset and object numbers. */ for (i = 0; i < count; i++) { nvlist_t *nv; /* ignoring zb_blkid and zb_level for now */ if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && zb[i-1].zb_object == zb[i].zb_object) continue; if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) goto nomem; if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, zb[i].zb_objset) != 0) { nvlist_free(nv); goto nomem; } if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, zb[i].zb_object) != 0) { nvlist_free(nv); goto nomem; } if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { nvlist_free(nv); goto nomem; } nvlist_free(nv); } free(nvlist_dst); return (0); nomem: free(nvlist_dst); free((void *)(uintptr_t)zc.zc_nvlist_dst); return (no_memory(zhp->zpool_hdl)); } /* * Upgrade a ZFS pool to the latest on-disk version. */ int zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) { zfs_cmd_t zc = { 0 }; libzfs_handle_t *hdl = zhp->zpool_hdl; (void) strcpy(zc.zc_name, zhp->zpool_name); zc.zc_cookie = new_version; if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) return (zpool_standard_error_fmt(hdl, errno, dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), zhp->zpool_name)); return (0); } void zpool_set_history_str(const char *subcommand, int argc, char **argv, char *history_str) { int i; (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN); for (i = 1; i < argc; i++) { if (strlen(history_str) + 1 + strlen(argv[i]) > HIS_MAX_RECORD_LEN) break; (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN); (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN); } } /* * Stage command history for logging. */ int zpool_stage_history(libzfs_handle_t *hdl, const char *history_str) { if (history_str == NULL) return (EINVAL); if (strlen(history_str) > HIS_MAX_RECORD_LEN) return (EINVAL); if (hdl->libzfs_log_str != NULL) free(hdl->libzfs_log_str); if ((hdl->libzfs_log_str = strdup(history_str)) == NULL) return (no_memory(hdl)); return (0); } /* * Perform ioctl to get some command history of a pool. * * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the * logical offset of the history buffer to start reading from. * * Upon return, 'off' is the next logical offset to read from and * 'len' is the actual amount of bytes read into 'buf'. */ static int get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) { zfs_cmd_t zc = { 0 }; libzfs_handle_t *hdl = zhp->zpool_hdl; (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); zc.zc_history = (uint64_t)(uintptr_t)buf; zc.zc_history_len = *len; zc.zc_history_offset = *off; if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { switch (errno) { case EPERM: return (zfs_error_fmt(hdl, EZFS_PERM, dgettext(TEXT_DOMAIN, "cannot show history for pool '%s'"), zhp->zpool_name)); case ENOENT: return (zfs_error_fmt(hdl, EZFS_NOHISTORY, dgettext(TEXT_DOMAIN, "cannot get history for pool " "'%s'"), zhp->zpool_name)); case ENOTSUP: return (zfs_error_fmt(hdl, EZFS_BADVERSION, dgettext(TEXT_DOMAIN, "cannot get history for pool " "'%s', pool must be upgraded"), zhp->zpool_name)); default: return (zpool_standard_error_fmt(hdl, errno, dgettext(TEXT_DOMAIN, "cannot get history for '%s'"), zhp->zpool_name)); } } *len = zc.zc_history_len; *off = zc.zc_history_offset; return (0); } /* * Process the buffer of nvlists, unpacking and storing each nvlist record * into 'records'. 'leftover' is set to the number of bytes that weren't * processed as there wasn't a complete record. */ int zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, nvlist_t ***records, uint_t *numrecords) { uint64_t reclen; nvlist_t *nv; int i; while (bytes_read > sizeof (reclen)) { /* get length of packed record (stored as little endian) */ for (i = 0, reclen = 0; i < sizeof (reclen); i++) reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); if (bytes_read < sizeof (reclen) + reclen) break; /* unpack record */ if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) return (ENOMEM); bytes_read -= sizeof (reclen) + reclen; buf += sizeof (reclen) + reclen; /* add record to nvlist array */ (*numrecords)++; if (ISP2(*numrecords + 1)) { *records = realloc(*records, *numrecords * 2 * sizeof (nvlist_t *)); } (*records)[*numrecords - 1] = nv; } *leftover = bytes_read; return (0); } #define HIS_BUF_LEN (128*1024) /* * Retrieve the command history of a pool. */ int zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) { char buf[HIS_BUF_LEN]; uint64_t off = 0; nvlist_t **records = NULL; uint_t numrecords = 0; int err, i; do { uint64_t bytes_read = sizeof (buf); uint64_t leftover; if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) break; /* if nothing else was read in, we're at EOF, just return */ if (!bytes_read) break; if ((err = zpool_history_unpack(buf, bytes_read, &leftover, &records, &numrecords)) != 0) break; off -= leftover; /* CONSTCOND */ } while (1); if (!err) { verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, records, numrecords) == 0); } for (i = 0; i < numrecords; i++) nvlist_free(records[i]); free(records); return (err); } void zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, char *pathname, size_t len) { zfs_cmd_t zc = { 0 }; boolean_t mounted = B_FALSE; char *mntpnt = NULL; char dsname[MAXNAMELEN]; if (dsobj == 0) { /* special case for the MOS */ (void) snprintf(pathname, len, "<metadata>:<0x%llx>", (u_longlong_t) obj); return; } /* get the dataset's name */ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); zc.zc_obj = dsobj; if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { /* just write out a path of two object numbers */ (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", (u_longlong_t) dsobj, (u_longlong_t) obj); return; } (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); /* find out if the dataset is mounted */ mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); /* get the corrupted object's path */ (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); zc.zc_obj = obj; if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, &zc) == 0) { if (mounted) { (void) snprintf(pathname, len, "%s%s", mntpnt, zc.zc_value); } else { (void) snprintf(pathname, len, "%s:%s", dsname, zc.zc_value); } } else { (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, (u_longlong_t) obj); } free(mntpnt); } /* * Read the EFI label from the config, if a label does not exist then * pass back the error to the caller. If the caller has passed a non-NULL * diskaddr argument then we set it to the starting address of the EFI * partition. */ /* ZFS-FUSE: not implemented */ #if 0 static int read_efi_label(nvlist_t *config, diskaddr_t *sb) { char *path; int fd; char diskname[MAXPATHLEN]; int err = -1; if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) return (err); (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, strrchr(path, '/')); if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { struct dk_gpt *vtoc; if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { if (sb != NULL) *sb = vtoc->efi_parts[0].p_start; efi_free(vtoc); } (void) close(fd); } return (err); } /* * determine where a partition starts on a disk in the current * configuration */ static diskaddr_t find_start_block(nvlist_t *config) { nvlist_t **child; uint_t c, children; diskaddr_t sb = MAXOFFSET_T; uint64_t wholedisk; if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_WHOLE_DISK, &wholedisk) != 0 || !wholedisk) { return (MAXOFFSET_T); } if (read_efi_label(config, &sb) < 0) sb = MAXOFFSET_T; return (sb); } for (c = 0; c < children; c++) { sb = find_start_block(child[c]); if (sb != MAXOFFSET_T) { return (sb); } } return (MAXOFFSET_T); } #endif /* * Label an individual disk. The name provided is the short name, * stripped of any leading /dev path. */ /* ZFS-FUSE: not implemented */ #if 0 int zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name) { char path[MAXPATHLEN]; struct dk_gpt *vtoc; int fd; size_t resv = EFI_MIN_RESV_SIZE; uint64_t slice_size; diskaddr_t start_block; char errbuf[1024]; /* prepare an error message just in case */ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); if (zhp) { nvlist_t *nvroot; if (pool_is_bootable(zhp)) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "EFI labeled devices are not supported on root " "pools.")); return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); } verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); if (zhp->zpool_start_block == 0) start_block = find_start_block(nvroot); else start_block = zhp->zpool_start_block; zhp->zpool_start_block = start_block; } else { /* new pool */ start_block = NEW_START_BLOCK; } (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, BACKUP_SLICE); if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { /* * This shouldn't happen. We've long since verified that this * is a valid device. */ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "unable to open device")); return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); } if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { /* * The only way this can fail is if we run out of memory, or we * were unable to read the disk's capacity */ if (errno == ENOMEM) (void) no_memory(hdl); (void) close(fd); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "unable to read disk capacity"), name); return (zfs_error(hdl, EZFS_NOCAP, errbuf)); } slice_size = vtoc->efi_last_u_lba + 1; slice_size -= EFI_MIN_RESV_SIZE; if (start_block == MAXOFFSET_T) start_block = NEW_START_BLOCK; slice_size -= start_block; vtoc->efi_parts[0].p_start = start_block; vtoc->efi_parts[0].p_size = slice_size; /* * Why we use V_USR: V_BACKUP confuses users, and is considered * disposable by some EFI utilities (since EFI doesn't have a backup * slice). V_UNASSIGNED is supposed to be used only for zero size * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, * etc. were all pretty specific. V_USR is as close to reality as we * can get, in the absence of V_OTHER. */ vtoc->efi_parts[0].p_tag = V_USR; (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); vtoc->efi_parts[8].p_start = slice_size + start_block; vtoc->efi_parts[8].p_size = resv; vtoc->efi_parts[8].p_tag = V_RESERVED; if (efi_write(fd, vtoc) != 0) { /* * Some block drivers (like pcata) may not support EFI * GPT labels. Print out a helpful error message dir- * ecting the user to manually label the disk and give * a specific slice. */ (void) close(fd); efi_free(vtoc); zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using fdisk(1M) and then provide a specific slice")); return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); } (void) close(fd); efi_free(vtoc); return (0); } static boolean_t supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) { char *type; nvlist_t **child; uint_t children, c; verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 || strcmp(type, VDEV_TYPE_FILE) == 0 || strcmp(type, VDEV_TYPE_LOG) == 0 || strcmp(type, VDEV_TYPE_HOLE) == 0 || strcmp(type, VDEV_TYPE_MISSING) == 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "vdev type '%s' is not supported"), type); (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); return (B_FALSE); } if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, &child, &children) == 0) { for (c = 0; c < children; c++) { if (!supported_dump_vdev_type(hdl, child[c], errbuf)) return (B_FALSE); } } return (B_TRUE); } /* * check if this zvol is allowable for use as a dump device; zero if * it is, > 0 if it isn't, < 0 if it isn't a zvol */ int zvol_check_dump_config(char *arg) { zpool_handle_t *zhp = NULL; nvlist_t *config, *nvroot; char *p, *volname; nvlist_t **top; uint_t toplevels; libzfs_handle_t *hdl; char errbuf[1024]; char poolname[ZPOOL_MAXNAMELEN]; int pathlen = strlen(ZVOL_FULL_DEV_DIR); int ret = 1; if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { return (-1); } (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, "dump is not supported on device '%s'"), arg); if ((hdl = libzfs_init()) == NULL) return (1); libzfs_print_on_error(hdl, B_TRUE); volname = arg + pathlen; /* check the configuration of the pool */ if ((p = strchr(volname, '/')) == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "malformed dataset name")); (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); return (1); } else if (p - volname >= ZFS_MAXNAMELEN) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "dataset name is too long")); (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); return (1); } else { (void) strncpy(poolname, volname, p - volname); poolname[p - volname] = '\0'; } if ((zhp = zpool_open(hdl, poolname)) == NULL) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "could not open pool '%s'"), poolname); (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); goto out; } config = zpool_get_config(zhp, NULL); if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "could not obtain vdev configuration for '%s'"), poolname); (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); goto out; } verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &top, &toplevels) == 0); if (toplevels != 1) { zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' has multiple top level vdevs"), poolname); (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf); goto out; } if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { goto out; } ret = 0; out: if (zhp) zpool_close(zhp); libzfs_fini(hdl); return (ret); } #endif
angular.module('appTesting').service("LoginLocalStorage", function () { "use strict"; var STORE_NAME = "login"; var setUser = function setUser(user) { localStorage.setItem(STORE_NAME, JSON.stringify(user)); } var getUser = function getUser() { var storedTasks = localStorage.getItem(STORE_NAME); if (storedTasks) { return JSON.parse(storedTasks); } return {}; } return { setUser: setUser, getUser: getUser } });
--- layout: post title: Bhaas date: '2007-06-17 21:32:00' tags: ["poetry"] --- <p><a onblur="try {parent.deselectBloggerImageGracefully();} catch(e) {}" href="http://bp2.blogger.com/_cWdd7TsTIWo/RnWon-oIleI/AAAAAAAAAAk/PCXo2q26GsQ/s1600-h/bhas.JPG"><img style="display:block; margin:0px auto 10px; text-align:center;cursor:pointer; cursor:hand;" src="http://bp2.blogger.com/_cWdd7TsTIWo/RnWon-oIleI/AAAAAAAAAAk/PCXo2q26GsQ/s320/bhas.JPG" border="0" alt="" id="BLOGGER_PHOTO_ID_5077149559709799906"/></a></p><div class="blogger-post-footer"><img width="1" height="1" src="https://blogger.googleusercontent.com/tracker/5416117946427095362-1632748105544492475?l=soranthou.blogspot.com" alt=""/></div>
b'16 + (-4 + 7 - 12)\n'
b'What is (11 - -25) + 22 - 24?\n'
b'What is 12 + -24 + (-11 - -2)?\n'
b'What is the value of (9 - 4 - (11 - 2) - 2) + -6?\n'
'use babel'; import MapQueries from '../lib/map-queries'; // Use the command `window:run-package-specs` (cmd-alt-ctrl-p) to run specs. // // To run a specific `it` or `describe` block add an `f` to the front (e.g. `fit` // or `fdescribe`). Remove the `f` to unfocus the block. describe('MapQueries', () => { let workspaceElement, activationPromise; beforeEach(() => { workspaceElement = atom.views.getView(atom.workspace); activationPromise = atom.packages.activatePackage('map-queries'); }); describe('when the map-queries:toggle event is triggered', () => { it('hides and shows the modal panel', () => { // Before the activation event the view is not on the DOM, and no panel // has been created expect(workspaceElement.querySelector('.map-queries')).not.toExist(); // This is an activation event, triggering it will cause the package to be // activated. atom.commands.dispatch(workspaceElement, 'map-queries:toggle'); waitsForPromise(() => { return activationPromise; }); runs(() => { expect(workspaceElement.querySelector('.map-queries')).toExist(); let mapQueriesElement = workspaceElement.querySelector('.map-queries'); expect(mapQueriesElement).toExist(); let mapQueriesPanel = atom.workspace.panelForItem(mapQueriesElement); expect(mapQueriesPanel.isVisible()).toBe(true); atom.commands.dispatch(workspaceElement, 'map-queries:toggle'); expect(mapQueriesPanel.isVisible()).toBe(false); }); }); it('hides and shows the view', () => { // This test shows you an integration test testing at the view level. // Attaching the workspaceElement to the DOM is required to allow the // `toBeVisible()` matchers to work. Anything testing visibility or focus // requires that the workspaceElement is on the DOM. Tests that attach the // workspaceElement to the DOM are generally slower than those off DOM. jasmine.attachToDOM(workspaceElement); expect(workspaceElement.querySelector('.map-queries')).not.toExist(); // This is an activation event, triggering it causes the package to be // activated. atom.commands.dispatch(workspaceElement, 'map-queries:toggle'); waitsForPromise(() => { return activationPromise; }); runs(() => { // Now we can test for view visibility let mapQueriesElement = workspaceElement.querySelector('.map-queries'); expect(mapQueriesElement).toBeVisible(); atom.commands.dispatch(workspaceElement, 'map-queries:toggle'); expect(mapQueriesElement).not.toBeVisible(); }); }); }); });
b'What is (-1 - -8) + 2 + -9?\n'
b'What is -6 + 2 + 10 + -2 + -30?\n'
#include "HotNeedleLightControl.h" HotNeedleLightControlClass::HotNeedleLightControlClass(uint8_t background[NEOPIXEL_COUNT][COLOR_BYTES], uint8_t hotNeedleColor[COLOR_BYTES], float highlightMultiplier, bool useHighlight, uint16_t fadeTime, uint8_t framePeriod, Adafruit_NeoPixel *strip) : LightControlClass(framePeriod, strip) { memcpy(this->backgroundColors, background, COLOR_BYTES*NEOPIXEL_COUNT); memcpy(this->hotNeedleColor, hotNeedleColor, COLOR_BYTES); fadeFrames = fadeTime / framePeriod; this->useHighlight = useHighlight; this->highlightMultiplier = highlightMultiplier; this->maximumLedPosition = 0; this->minimumLedPosition = NEOPIXEL_COUNT; } // Rendering code void HotNeedleLightControlClass::renderFrame(uint16_t pos, NEEDLE_DIRECTION dir) { // Increment existing counters decrementCounters(ledCounters); uint16_t needlePosition = pixelFromInputPosition(pos); // Set current position hot pixel counter to max ledCounters[needlePosition] = fadeFrames; draw(needlePosition); } void HotNeedleLightControlClass::draw(uint16_t needlePosition) { // Calculate display values for each pixel for (uint16_t p = 0; p < NEOPIXEL_COUNT; p++) { float backgroundRatio = (float)(fadeFrames - ledCounters[p]) / fadeFrames; float foregroundRatio = 1.0 - backgroundRatio; for (uint8_t c = 0; c < COLOR_BYTES; c++) { if (useHighlight) { // Foreground color is background color * highlight multiplier // Make sure we don't wrap past 255 int bg = backgroundColors[p][c] * highlightMultiplier; if (bg > 255) { bg = 255; } ledCurrentColors[p][c] = gammaCorrect((foregroundRatio * bg) + (backgroundRatio * backgroundColors[p][c])); } else { ledCurrentColors[p][c] = gammaCorrect((foregroundRatio * hotNeedleColor[c]) + (backgroundRatio * backgroundColors[p][c])); } } strip->setPixelColor(p, ledCurrentColors[p][RED], ledCurrentColors[p][GREEN], ledCurrentColors[p][BLUE]); } if(useMaximum){ updateMaximum(needlePosition); drawMaximum(); } if(useMinimum){ updateMinimum(needlePosition); drawMinimum(); } strip->show(); }
module V1 class EventUserSchedulesController < ApplicationController before_action :set_event_session, only: [:create] # POST /event_user_schedules def create @event_user_schedule = current_user.add_session_to_my_schedule(@event_session) if @event_user_schedule.save render json: @event_user_schedule, serializer: EventUserScheduleShortSerializer, root: "event_user_schedule", status: :created else render json: @event_user_schedule.errors, status: :unprocessable_entity end end # DELETE /event_user_schedules/:id def destroy @event_user_schedule = EventUserSchedule.find(params[:id]) if @event_user_schedule.event_user.user == current_user @event_user_schedule.destroy head :no_content else head :forbidden end end private # Never trust parameters from the scary internet, only allow the white list through. def event_user_schedule_params params.require(:event_user_schedule).permit(:event_session_id) end def set_event_session @event_session = EventSession.find(event_user_schedule_params[:event_session_id]) end end end
b'Evaluate 4 - (-3 + -1 + (16 - 15) + 8).\n'
import json import os from flask import request, g, render_template, make_response, jsonify, Response from helpers.raw_endpoint import get_id, store_json_to_file from helpers.groups import get_groups from json_controller import JSONController from main import app from pymongo import MongoClient, errors HERE = os.path.dirname(os.path.abspath(__file__)) # setup database connection def connect_client(): """Connects to Mongo client""" try: return MongoClient(app.config['DB_HOST'], int(app.config['DB_PORT'])) except errors.ConnectionFailure as e: raise e def get_db(): """Connects to Mongo database""" if not hasattr(g, 'mongo_client'): g.mongo_client = connect_client() g.mongo_db = getattr(g.mongo_client, app.config['DB_NAME']) g.groups_collection = g.mongo_db[os.environ.get('DB_GROUPS_COLLECTION')] return g.mongo_db @app.teardown_appcontext def close_db(error): """Closes connection with Mongo client""" if hasattr(g, 'mongo_client'): g.mongo_client.close() # Begin view routes @app.route('/') @app.route('/index/') def index(): """Landing page for SciNet""" return render_template("index.html") @app.route('/faq/') def faq(): """FAQ page for SciNet""" return render_template("faq.html") @app.route('/leaderboard/') def leaderboard(): """Leaderboard page for SciNet""" get_db() groups = get_groups(g.groups_collection) return render_template("leaderboard.html", groups=groups) @app.route('/ping', methods=['POST']) def ping_endpoint(): """API endpoint determines potential article hash exists in db :return: status code 204 -- hash not present, continue submission :return: status code 201 -- hash already exists, drop submission """ db = get_db() target_hash = request.form.get('hash') if db.raw.find({'hash': target_hash}).count(): return Response(status=201) else: return Response(status=204) @app.route('/articles') def ArticleEndpoint(): """Eventual landing page for searching/retrieving articles""" if request.method == 'GET': return render_template("articles.html") @app.route('/raw', methods=['POST']) def raw_endpoint(): """API endpoint for submitting raw article data :return: status code 405 - invalid JSON or invalid request type :return: status code 400 - unsupported content-type or invalid publisher :return: status code 201 - successful submission """ # Ensure post's content-type is supported if request.headers['content-type'] == 'application/json': # Ensure data is a valid JSON try: user_submission = json.loads(request.data) except ValueError: return Response(status=405) # generate UID for new entry uid = get_id() # store incoming JSON in raw storage file_path = os.path.join( HERE, 'raw_payloads', str(uid) ) store_json_to_file(user_submission, file_path) # hand submission to controller and return Resposne db = get_db() controller_response = JSONController(user_submission, db=db, _id=uid).submit() return controller_response # User submitted an unsupported content-type else: return Response(status=400) #@TODO: Implicit or Explicit group additions? Issue #51 comments on the issues page #@TODO: Add form validation @app.route('/requestnewgroup/', methods=['POST']) def request_new_group(): # Grab submission form data and prepare email message data = request.json msg = "Someone has request that you add {group_name} to the leaderboard \ groups. The groups website is {group_website} and the submitter can \ be reached at {submitter_email}.".format( group_name=data['new_group_name'], group_website=data['new_group_website'], submitter_email=data['submitter_email']) return Response(status=200) ''' try: email( subject="SciNet: A new group has been requested", fro="[email protected]", to='[email protected]', msg=msg) return Response(status=200) except: return Response(status=500) ''' # Error handlers @app.errorhandler(404) def not_found(error): return make_response(jsonify( { 'error': 'Page Not Found' } ), 404) @app.errorhandler(405) def method_not_allowed(error): return make_response(jsonify( { 'error': 'Method Not Allowed' } ), 405)
import { createSelector } from 'reselect'; import * as movie from './../actions/movie'; import { Movie } from './../models'; import * as _ from 'lodash'; import { AsyncOperation, AsyncStatus, makeAsyncOp } from "./../utils"; export interface State { entities: { [movieId: string]: Movie }; mapMovieToCinema: { [cinemaId: string]: { releasedIds: string[] otherIds: string[], loadingOp: AsyncOperation, } }; selectedId: string; } export const initialState: State = { entities: {}, mapMovieToCinema: {}, selectedId: null, }; export function reducer(state: State = initialState, actionRaw: movie.Actions): State { switch (actionRaw.type) { case movie.ActionTypes.LOAD: { let action = <movie.LoadAction>actionRaw; let cinemaId = action.payload.cinemaId; return { ...state, mapMovieToCinema: { ...state.mapMovieToCinema, [cinemaId]: { ...state.mapMovieToCinema[cinemaId], releasedIds: [], otherIds: [], loadingOp: makeAsyncOp(AsyncStatus.Pending), }, }, }; } case movie.ActionTypes.LOAD_SUCCESS: { let action = <movie.LoadSuccessAction>actionRaw; let entities = _.flatten([action.payload.released, action.payload.other]) .reduce((entities, movie) => { return { ...entities, [movie.id]: movie, }; }, state.entities); let map = { releasedIds: action.payload.released.map(m => m.id), otherIds: action.payload.other.map(m => m.id), loadingOp: makeAsyncOp(AsyncStatus.Success), }; return { ...state, entities: entities, mapMovieToCinema: { ...state.mapMovieToCinema, [action.payload.cinemaId]: map }, }; } case movie.ActionTypes.LOAD_FAIL: { let action = <movie.LoadFailAction>actionRaw; let cinemaId = action.payload.cinemaId; return { ...state, mapMovieToCinema: { ...state.mapMovieToCinema, [cinemaId]: { ...state.mapMovieToCinema[cinemaId], loadingOp: makeAsyncOp(AsyncStatus.Fail, action.payload.errorMessage), }, }, }; } case movie.ActionTypes.SELECT: { var action = <movie.SelectAction>actionRaw; return { ...state, selectedId: action.payload, }; } default: return state; } } export const getEntities = (state: State) => state.entities; export const getMapToCinema = (state: State) => state.mapMovieToCinema; export const getSelectedId = (state: State) => state.selectedId; export const getSelected = createSelector(getEntities, getSelectedId, (entities, id) => { return entities[id]; });
<?php namespace PragmaRX\Sdk\Services\Accounts\Exceptions; use PragmaRX\Sdk\Core\HttpResponseException; class InvalidPassword extends HttpResponseException { protected $message = 'paragraphs.invalid-password'; }
match x: | it?(): true
'use strict'; const expect = require('expect.js'); const http = require('http'); const express = require('express'); const linkCheck = require('../'); describe('link-check', function () { this.timeout(2500);//increase timeout to enable 429 retry tests let baseUrl; let laterCustomRetryCounter; before(function (done) { const app = express(); app.head('/nohead', function (req, res) { res.sendStatus(405); // method not allowed }); app.get('/nohead', function (req, res) { res.sendStatus(200); }); app.get('/foo/redirect', function (req, res) { res.redirect('/foo/bar'); }); app.get('/foo/bar', function (req, res) { res.json({foo:'bar'}); }); app.get('/loop', function (req, res) { res.redirect('/loop'); }); app.get('/hang', function (req, res) { // no reply }); app.get('/notfound', function (req, res) { res.sendStatus(404); }); app.get('/basic-auth', function (req, res) { if (req.headers["authorization"] === "Basic Zm9vOmJhcg==") { return res.sendStatus(200); } res.sendStatus(401); }); // prevent first header try to be a hit app.head('/later-custom-retry-count', function (req, res) { res.sendStatus(405); // method not allowed }); app.get('/later-custom-retry-count', function (req, res) { laterCustomRetryCounter++; if(laterCustomRetryCounter === parseInt(req.query.successNumber)) { res.sendStatus(200); }else{ res.setHeader('retry-after', 1); res.sendStatus(429); } }); // prevent first header try to be a hit app.head('/later-standard-header', function (req, res) { res.sendStatus(405); // method not allowed }); var stdRetried = false; var stdFirstTry = 0; app.get('/later', function (req, res) { var isRetryDelayExpired = stdFirstTry + 1000 < Date.now(); if(!stdRetried || !isRetryDelayExpired){ stdFirstTry = Date.now(); stdRetried = true; res.setHeader('retry-after', 1); res.sendStatus(429); }else{ res.sendStatus(200); } }); // prevent first header try to be a hit app.head('/later-no-header', function (req, res) { res.sendStatus(405); // method not allowed }); var stdNoHeadRetried = false; var stdNoHeadFirstTry = 0; app.get('/later-no-header', function (req, res) { var minTime = stdNoHeadFirstTry + 1000; var maxTime = minTime + 100; var now = Date.now(); var isRetryDelayExpired = minTime < now && now < maxTime; if(!stdNoHeadRetried || !isRetryDelayExpired){ stdNoHeadFirstTry = Date.now(); stdNoHeadRetried = true; res.sendStatus(429); }else{ res.sendStatus(200); } }); // prevent first header try to be a hit app.head('/later-non-standard-header', function (req, res) { res.sendStatus(405); // method not allowed }); var nonStdRetried = false; var nonStdFirstTry = 0; app.get('/later-non-standard-header', function (req, res) { var isRetryDelayExpired = nonStdFirstTry + 1000 < Date.now(); if(!nonStdRetried || !isRetryDelayExpired){ nonStdFirstTry = Date.now(); nonStdRetried = true; res.setHeader('retry-after', '1s'); res.sendStatus(429); }else { res.sendStatus(200); } }); app.get(encodeURI('/url_with_unicode–'), function (req, res) { res.sendStatus(200); }); app.get('/url_with_special_chars\\(\\)\\+', function (req, res) { res.sendStatus(200); }); const server = http.createServer(app); server.listen(0 /* random open port */, 'localhost', function serverListen(err) { if (err) { done(err); return; } baseUrl = 'http://' + server.address().address + ':' + server.address().port; done(); }); }); it('should find that a valid link is alive', function (done) { linkCheck(baseUrl + '/foo/bar', function (err, result) { expect(err).to.be(null); expect(result.link).to.be(baseUrl + '/foo/bar'); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); expect(result.err).to.be(null); done(); }); }); it('should find that a valid external link with basic authentication is alive', function (done) { linkCheck(baseUrl + '/basic-auth', { headers: { 'Authorization': 'Basic Zm9vOmJhcg==' }, }, function (err, result) { expect(err).to.be(null); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); expect(result.err).to.be(null); done(); }); }); it('should find that a valid relative link is alive', function (done) { linkCheck('/foo/bar', { baseUrl: baseUrl }, function (err, result) { expect(err).to.be(null); expect(result.link).to.be('/foo/bar'); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); expect(result.err).to.be(null); done(); }); }); it('should find that an invalid link is dead', function (done) { linkCheck(baseUrl + '/foo/dead', function (err, result) { expect(err).to.be(null); expect(result.link).to.be(baseUrl + '/foo/dead'); expect(result.status).to.be('dead'); expect(result.statusCode).to.be(404); expect(result.err).to.be(null); done(); }); }); it('should find that an invalid relative link is dead', function (done) { linkCheck('/foo/dead', { baseUrl: baseUrl }, function (err, result) { expect(err).to.be(null); expect(result.link).to.be('/foo/dead'); expect(result.status).to.be('dead'); expect(result.statusCode).to.be(404); expect(result.err).to.be(null); done(); }); }); it('should report no DNS entry as a dead link (http)', function (done) { linkCheck('http://example.example.example.com/', function (err, result) { expect(err).to.be(null); expect(result.link).to.be('http://example.example.example.com/'); expect(result.status).to.be('dead'); expect(result.statusCode).to.be(0); expect(result.err.code).to.be('ENOTFOUND'); done(); }); }); it('should report no DNS entry as a dead link (https)', function (done) { const badLink = 'https://githuuuub.com/tcort/link-check'; linkCheck(badLink, function (err, result) { expect(err).to.be(null); expect(result.link).to.be(badLink); expect(result.status).to.be('dead'); expect(result.statusCode).to.be(0); expect(result.err.code).to.contain('ENOTFOUND'); done(); }); }); it('should timeout if there is no response', function (done) { linkCheck(baseUrl + '/hang', { timeout: '100ms' }, function (err, result) { expect(err).to.be(null); expect(result.link).to.be(baseUrl + '/hang'); expect(result.status).to.be('dead'); expect(result.statusCode).to.be(0); expect(result.err.code).to.be('ECONNRESET'); done(); }); }); it('should try GET if HEAD fails', function (done) { linkCheck(baseUrl + '/nohead', function (err, result) { expect(err).to.be(null); expect(result.link).to.be(baseUrl + '/nohead'); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); expect(result.err).to.be(null); done(); }); }); it('should handle redirects', function (done) { linkCheck(baseUrl + '/foo/redirect', function (err, result) { expect(err).to.be(null); expect(result.link).to.be(baseUrl + '/foo/redirect'); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); expect(result.err).to.be(null); done(); }); }); it('should handle valid mailto', function (done) { linkCheck('mailto:[email protected]', function (err, result) { expect(err).to.be(null); expect(result.link).to.be('mailto:[email protected]'); expect(result.status).to.be('alive'); done(); }); }); it('should handle valid mailto with encoded characters in address', function (done) { linkCheck('mailto:foo%[email protected]', function (err, result) { expect(err).to.be(null); expect(result.link).to.be('mailto:foo%[email protected]'); expect(result.status).to.be('alive'); done(); }); }); it('should handle valid mailto containing hfields', function (done) { linkCheck('mailto:[email protected]?subject=caf%C3%A9', function (err, result) { expect(err).to.be(null); expect(result.link).to.be('mailto:[email protected]?subject=caf%C3%A9'); expect(result.status).to.be('alive'); done(); }); }); it('should handle invalid mailto', function (done) { linkCheck('mailto:foo@@bar@@baz', function (err, result) { expect(err).to.be(null); expect(result.link).to.be('mailto:foo@@bar@@baz'); expect(result.status).to.be('dead'); done(); }); }); it('should handle file protocol', function(done) { linkCheck('fixtures/file.md', { baseUrl: 'file://' + __dirname }, function(err, result) { expect(err).to.be(null); expect(result.err).to.be(null); expect(result.status).to.be('alive'); done() }); }); it('should handle file protocol with fragment', function(done) { linkCheck('fixtures/file.md#section-1', { baseUrl: 'file://' + __dirname }, function(err, result) { expect(err).to.be(null); expect(result.err).to.be(null); expect(result.status).to.be('alive'); done() }); }); it('should handle file protocol with query', function(done) { linkCheck('fixtures/file.md?foo=bar', { baseUrl: 'file://' + __dirname }, function(err, result) { expect(err).to.be(null); expect(result.err).to.be(null); expect(result.status).to.be('alive'); done() }); }); it('should handle file path containing spaces', function(done) { linkCheck('fixtures/s p a c e/A.md', { baseUrl: 'file://' + __dirname }, function(err, result) { expect(err).to.be(null); expect(result.err).to.be(null); expect(result.status).to.be('alive'); done() }); }); it('should handle baseUrl containing spaces', function(done) { linkCheck('A.md', { baseUrl: 'file://' + __dirname + '/fixtures/s p a c e'}, function(err, result) { expect(err).to.be(null); expect(result.err).to.be(null); expect(result.status).to.be('alive'); done() }); }); it('should handle file protocol and invalid files', function(done) { linkCheck('fixtures/missing.md', { baseUrl: 'file://' + __dirname }, function(err, result) { expect(err).to.be(null); expect(result.err.code).to.be('ENOENT'); expect(result.status).to.be('dead'); done() }); }); it('should ignore file protocol on absolute links', function(done) { linkCheck(baseUrl + '/foo/bar', { baseUrl: 'file://' }, function(err, result) { expect(err).to.be(null); expect(result.link).to.be(baseUrl + '/foo/bar'); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); expect(result.err).to.be(null); done() }); }); it('should ignore file protocol on fragment links', function(done) { linkCheck('#foobar', { baseUrl: 'file://' }, function(err, result) { expect(err).to.be(null); expect(result.link).to.be('#foobar'); done() }); }); it('should callback with an error on unsupported protocol', function (done) { linkCheck('gopher://gopher/0/v2/vstat', function (err, result) { expect(result).to.be(null); expect(err).to.be.an(Error); done(); }); }); it('should handle redirect loops', function (done) { linkCheck(baseUrl + '/loop', function (err, result) { expect(err).to.be(null); expect(result.link).to.be(baseUrl + '/loop'); expect(result.status).to.be('dead'); expect(result.statusCode).to.be(0); expect(result.err.message).to.contain('Max redirects reached'); done(); }); }); it('should honour response codes in opts.aliveStatusCodes[]', function (done) { linkCheck(baseUrl + '/notfound', { aliveStatusCodes: [ 404, 200 ] }, function (err, result) { expect(err).to.be(null); expect(result.link).to.be(baseUrl + '/notfound'); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(404); done(); }); }); it('should honour regexps in opts.aliveStatusCodes[]', function (done) { linkCheck(baseUrl + '/notfound', { aliveStatusCodes: [ 200, /^[45][0-9]{2}$/ ] }, function (err, result) { expect(err).to.be(null); expect(result.link).to.be(baseUrl + '/notfound'); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(404); done(); }); }); it('should honour opts.aliveStatusCodes[]', function (done) { linkCheck(baseUrl + '/notfound', { aliveStatusCodes: [ 200 ] }, function (err, result) { expect(err).to.be(null); expect(result.link).to.be(baseUrl + '/notfound'); expect(result.status).to.be('dead'); expect(result.statusCode).to.be(404); done(); }); }); it('should retry after the provided delay on HTTP 429 with standard header', function (done) { linkCheck(baseUrl + '/later', { retryOn429: true }, function (err, result) { expect(err).to.be(null); expect(result.err).to.be(null); expect(result.link).to.be(baseUrl + '/later'); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); done(); }); }); it('should retry after the provided delay on HTTP 429 with non standard header, and return a warning', function (done) { linkCheck(baseUrl + '/later-non-standard-header', { retryOn429: true }, function (err, result) { expect(err).to.be(null); expect(result.err).not.to.be(null) expect(result.err).to.contain("Server returned a non standard \'retry-after\' header."); expect(result.link).to.be(baseUrl + '/later-non-standard-header'); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); done(); }); }); it('should retry after 1s delay on HTTP 429 without header', function (done) { linkCheck(baseUrl + '/later-no-header', { retryOn429: true, fallbackRetryDelay: '1s' }, function (err, result) { expect(err).to.be(null); expect(result.err).to.be(null); expect(result.link).to.be(baseUrl + '/later-no-header'); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); done(); }); }); // 2 is default retry so test with custom 3 it('should retry 3 times for 429 status codes', function(done) { laterCustomRetryCounter = 0; linkCheck(baseUrl + '/later-custom-retry-count?successNumber=3', { retryOn429: true, retryCount: 3 }, function(err, result) { expect(err).to.be(null); expect(result.err).to.be(null); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); done(); }); }); // See issue #23 it('should handle non URL encoded unicode chars in URLs', function(done) { //last char is EN DASH linkCheck(baseUrl + '/url_with_unicode–', function(err, result) { expect(err).to.be(null); expect(result.err).to.be(null); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); done(); }); }); // See issues #34 and #40 it('should not URL encode already encoded characters', function(done) { linkCheck(baseUrl + '/url_with_special_chars%28%29%2B', function(err, result) { expect(err).to.be(null); expect(result.err).to.be(null); expect(result.status).to.be('alive'); expect(result.statusCode).to.be(200); done(); }); }); });
--- layout: post title: "IT业编程四大魔道天王" date: 2017-11-10 16:09:16 +0800 tag: [博客] --- IT业编程四大魔道天王的博客地址。 胡正 - [辟支佛胡正 · 阿罗汉尊者 · 功德藏闯菩萨](http://www.huzheng.org/myapps.php) 田春 - [Chun Tian (binghe)](http://tianchunbinghe.blog.163.com/) 李杀 - [Xah Lee Web 李杀网](http://xahlee.org/) 王垠 - [当然我在扯淡](http://www.yinwang.org/)
<?php use Illuminate\Database\Migrations\Migration; use Illuminate\Database\Schema\Blueprint; class CreatePlatformsTable extends Migration { /** * Run the migrations. * * @return void */ public function up() { Schema::create('platforms', function (Blueprint $table) { $table->increments('id'); $table->string('name'); $table->string('short_name'); $table->string('slug')->unique(); $table->string('logo'); $table->string('banner'); }); } /** * Reverse the migrations. * * @return void */ public function down() { Schema::drop('platforms'); } }
b'28 - 21 - (6 + 1) - -7 - 2\n'
b'Calculate -6 - (-1 + (-5 - -26) + 0 + -6).\n'
function daysLeftThisWeek (date) { return 6 - date.getDay() } module.exports = daysLeftThisWeek
// Copyright (c) 2013-2015 The btcsuite developers // Copyright (c) 2015 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wire_test import ( "bytes" "io" "reflect" "testing" "time" "github.com/davecgh/go-spew/spew" "github.com/decred/dcrd/chaincfg/chainhash" "github.com/decred/dcrd/wire" "github.com/decred/dcrutil" ) // TestBlock tests the MsgBlock API. func TestBlock(t *testing.T) { pver := wire.ProtocolVersion // Test block header. bh := wire.NewBlockHeader( int32(pver), // Version &testBlock.Header.PrevBlock, // PrevHash &testBlock.Header.MerkleRoot, // MerkleRoot &testBlock.Header.StakeRoot, // StakeRoot uint16(0x0000), // VoteBits [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, // FinalState uint16(0x0000), // Voters uint8(0x00), // FreshStake uint8(0x00), // Revocations uint32(0), // Poolsize testBlock.Header.Bits, // Bits int64(0x0000000000000000), // Sbits uint32(1), // Height uint32(1), // Size testBlock.Header.Nonce, // Nonce [36]byte{}, // ExtraData ) // Ensure the command is expected value. wantCmd := "block" msg := wire.NewMsgBlock(bh) if cmd := msg.Command(); cmd != wantCmd { t.Errorf("NewMsgBlock: wrong command - got %v want %v", cmd, wantCmd) } // Ensure max payload is expected value for latest protocol version. // Num addresses (varInt) + max allowed addresses. wantPayload := uint32(1000000) maxPayload := msg.MaxPayloadLength(pver) if maxPayload != wantPayload { t.Errorf("MaxPayloadLength: wrong max payload length for "+ "protocol version %d - got %v, want %v", pver, maxPayload, wantPayload) } // Ensure we get the same block header data back out. if !reflect.DeepEqual(&msg.Header, bh) { t.Errorf("NewMsgBlock: wrong block header - got %v, want %v", spew.Sdump(&msg.Header), spew.Sdump(bh)) } // Ensure transactions are added properly. tx := testBlock.Transactions[0].Copy() msg.AddTransaction(tx) if !reflect.DeepEqual(msg.Transactions, testBlock.Transactions) { t.Errorf("AddTransaction: wrong transactions - got %v, want %v", spew.Sdump(msg.Transactions), spew.Sdump(testBlock.Transactions)) } // Ensure transactions are properly cleared. msg.ClearTransactions() if len(msg.Transactions) != 0 { t.Errorf("ClearTransactions: wrong transactions - got %v, want %v", len(msg.Transactions), 0) } // Ensure stake transactions are added properly. stx := testBlock.STransactions[0].Copy() msg.AddSTransaction(stx) if !reflect.DeepEqual(msg.STransactions, testBlock.STransactions) { t.Errorf("AddSTransaction: wrong transactions - got %v, want %v", spew.Sdump(msg.STransactions), spew.Sdump(testBlock.STransactions)) } // Ensure transactions are properly cleared. msg.ClearSTransactions() if len(msg.STransactions) != 0 { t.Errorf("ClearTransactions: wrong transactions - got %v, want %v", len(msg.STransactions), 0) } return } // TestBlockTxShas tests the ability to generate a slice of all transaction // hashes from a block accurately. func TestBlockTxShas(t *testing.T) { // Block 1, transaction 1 hash. hashStr := "55a25248c04dd8b6599ca2a708413c00d79ae90ce075c54e8a967a647d7e4bea" wantHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) return } wantShas := []chainhash.Hash{*wantHash} shas := testBlock.TxShas() if !reflect.DeepEqual(shas, wantShas) { t.Errorf("TxShas: wrong transaction hashes - got %v, want %v", spew.Sdump(shas), spew.Sdump(wantShas)) } } // TestBlockSTxShas tests the ability to generate a slice of all stake transaction // hashes from a block accurately. func TestBlockSTxShas(t *testing.T) { // Block 1, transaction 1 hash. hashStr := "ae208a69f3ee088d0328126e3d9bef7652b108d1904f27b166c5999233a801d4" wantHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) return } wantShas := []chainhash.Hash{*wantHash} shas := testBlock.STxShas() if !reflect.DeepEqual(shas, wantShas) { t.Errorf("STxShas: wrong transaction hashes - got %v, want %v", spew.Sdump(shas), spew.Sdump(wantShas)) } } // TestBlockSha tests the ability to generate the hash of a block accurately. func TestBlockSha(t *testing.T) { // Block 1 hash. hashStr := "152437dada95368c42b19febc1702939fa9c1ccdb6fd7284e5b7a19d8fe6df7a" wantHash, err := chainhash.NewHashFromStr(hashStr) if err != nil { t.Errorf("NewShaHashFromStr: %v", err) } // Ensure the hash produced is expected. blockHash := testBlock.BlockSha() if !blockHash.IsEqual(wantHash) { t.Errorf("BlockSha: wrong hash - got %v, want %v", spew.Sprint(blockHash), spew.Sprint(wantHash)) } } // TestBlockWire tests the MsgBlock wire encode and decode for various numbers // of transaction inputs and outputs and protocol versions. func TestBlockWire(t *testing.T) { tests := []struct { in *wire.MsgBlock // Message to encode out *wire.MsgBlock // Expected decoded message buf []byte // Wire encoding txLocs []wire.TxLoc // Expected transaction locations sTxLocs []wire.TxLoc // Expected stake transaction locations pver uint32 // Protocol version for wire encoding }{ // Latest protocol version. { &testBlock, &testBlock, testBlockBytes, testBlockTxLocs, testBlockSTxLocs, wire.ProtocolVersion, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode the message to wire format. var buf bytes.Buffer err := test.in.BtcEncode(&buf, test.pver) if err != nil { t.Errorf("BtcEncode #%d error %v", i, err) continue } if !bytes.Equal(buf.Bytes(), test.buf) { t.Errorf("BtcEncode #%d\n got: %s want: %s", i, spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) continue } // Decode the message from wire format. var msg wire.MsgBlock rbuf := bytes.NewReader(test.buf) err = msg.BtcDecode(rbuf, test.pver) if err != nil { t.Errorf("BtcDecode #%d error %v", i, err) continue } if !reflect.DeepEqual(&msg, test.out) { t.Errorf("BtcDecode #%d\n got: %s want: %s", i, spew.Sdump(&msg), spew.Sdump(test.out)) continue } } } // TestBlockWireErrors performs negative tests against wire encode and decode // of MsgBlock to confirm error paths work correctly. func TestBlockWireErrors(t *testing.T) { // Use protocol version 60002 specifically here instead of the latest // because the test data is using bytes encoded with that protocol // version. pver := uint32(60002) tests := []struct { in *wire.MsgBlock // Value to encode buf []byte // Wire encoding pver uint32 // Protocol version for wire encoding max int // Max size of fixed buffer to induce errors writeErr error // Expected write error readErr error // Expected read error }{ // Force error in version. {&testBlock, testBlockBytes, pver, 0, io.ErrShortWrite, io.EOF}, // 0 // Force error in prev block hash. {&testBlock, testBlockBytes, pver, 4, io.ErrShortWrite, io.EOF}, // 1 // Force error in merkle root. {&testBlock, testBlockBytes, pver, 36, io.ErrShortWrite, io.EOF}, // 2 // Force error in stake root. {&testBlock, testBlockBytes, pver, 68, io.ErrShortWrite, io.EOF}, // 3 // Force error in vote bits. {&testBlock, testBlockBytes, pver, 100, io.ErrShortWrite, io.EOF}, // 4 // Force error in finalState. {&testBlock, testBlockBytes, pver, 102, io.ErrShortWrite, io.EOF}, // 5 // Force error in voters. {&testBlock, testBlockBytes, pver, 108, io.ErrShortWrite, io.EOF}, // 6 // Force error in freshstake. {&testBlock, testBlockBytes, pver, 110, io.ErrShortWrite, io.EOF}, // 7 // Force error in revocations. {&testBlock, testBlockBytes, pver, 111, io.ErrShortWrite, io.EOF}, // 8 // Force error in poolsize. {&testBlock, testBlockBytes, pver, 112, io.ErrShortWrite, io.EOF}, // 9 // Force error in difficulty bits. {&testBlock, testBlockBytes, pver, 116, io.ErrShortWrite, io.EOF}, // 10 // Force error in stake difficulty bits. {&testBlock, testBlockBytes, pver, 120, io.ErrShortWrite, io.EOF}, // 11 // Force error in height. {&testBlock, testBlockBytes, pver, 128, io.ErrShortWrite, io.EOF}, // 12 // Force error in size. {&testBlock, testBlockBytes, pver, 132, io.ErrShortWrite, io.EOF}, // 13 // Force error in timestamp. {&testBlock, testBlockBytes, pver, 136, io.ErrShortWrite, io.EOF}, // 14 // Force error in nonce. {&testBlock, testBlockBytes, pver, 140, io.ErrShortWrite, io.EOF}, // 15 // Force error in tx count. {&testBlock, testBlockBytes, pver, 180, io.ErrShortWrite, io.EOF}, // 16 // Force error in tx. {&testBlock, testBlockBytes, pver, 181, io.ErrShortWrite, io.EOF}, // 17 } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Encode to wire format. w := newFixedWriter(test.max) err := test.in.BtcEncode(w, test.pver) if err != test.writeErr { t.Errorf("BtcEncode #%d wrong error got: %v, want: %v", i, err, test.writeErr) continue } // Decode from wire format. var msg wire.MsgBlock r := newFixedReader(test.max, test.buf) err = msg.BtcDecode(r, test.pver) if err != test.readErr { t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", i, err, test.readErr) continue } } } // TestBlockSerialize tests MsgBlock serialize and deserialize. func TestBlockSerialize(t *testing.T) { tests := []struct { in *wire.MsgBlock // Message to encode out *wire.MsgBlock // Expected decoded message buf []byte // Serialized data txLocs []wire.TxLoc // Expected transaction locations sTxLocs []wire.TxLoc // Expected stake transaction locations }{ { &testBlock, &testBlock, testBlockBytes, testBlockTxLocs, testBlockSTxLocs, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Serialize the block. var buf bytes.Buffer err := test.in.Serialize(&buf) if err != nil { t.Errorf("Serialize #%d error %v", i, err) continue } if !bytes.Equal(buf.Bytes(), test.buf) { t.Errorf("Serialize #%d\n got: %s want: %s", i, spew.Sdump(buf.Bytes()), spew.Sdump(test.buf)) continue } // Deserialize the block. var block wire.MsgBlock rbuf := bytes.NewReader(test.buf) err = block.Deserialize(rbuf) if err != nil { t.Errorf("Deserialize #%d error %v", i, err) continue } if !reflect.DeepEqual(&block, test.out) { t.Errorf("Deserialize #%d\n got: %s want: %s", i, spew.Sdump(&block), spew.Sdump(test.out)) continue } // Deserialize the block while gathering transaction location // information. var txLocBlock wire.MsgBlock br := bytes.NewBuffer(test.buf) txLocs, sTxLocs, err := txLocBlock.DeserializeTxLoc(br) if err != nil { t.Errorf("DeserializeTxLoc #%d error %v", i, err) continue } if !reflect.DeepEqual(&txLocBlock, test.out) { t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i, spew.Sdump(&txLocBlock), spew.Sdump(test.out)) continue } if !reflect.DeepEqual(txLocs, test.txLocs) { t.Errorf("DeserializeTxLoc #%d\n got: %s want: %s", i, spew.Sdump(txLocs), spew.Sdump(test.txLocs)) continue } if !reflect.DeepEqual(sTxLocs, test.sTxLocs) { t.Errorf("DeserializeTxLoc, sTxLocs #%d\n got: %s want: %s", i, spew.Sdump(sTxLocs), spew.Sdump(test.sTxLocs)) continue } } } // TestBlockSerializeErrors performs negative tests against wire encode and // decode of MsgBlock to confirm error paths work correctly. func TestBlockSerializeErrors(t *testing.T) { tests := []struct { in *wire.MsgBlock // Value to encode buf []byte // Serialized data max int // Max size of fixed buffer to induce errors writeErr error // Expected write error readErr error // Expected read error }{ {&testBlock, testBlockBytes, 0, io.ErrShortWrite, io.EOF}, // 0 // Force error in prev block hash. {&testBlock, testBlockBytes, 4, io.ErrShortWrite, io.EOF}, // 1 // Force error in merkle root. {&testBlock, testBlockBytes, 36, io.ErrShortWrite, io.EOF}, // 2 // Force error in stake root. {&testBlock, testBlockBytes, 68, io.ErrShortWrite, io.EOF}, // 3 // Force error in vote bits. {&testBlock, testBlockBytes, 100, io.ErrShortWrite, io.EOF}, // 4 // Force error in finalState. {&testBlock, testBlockBytes, 102, io.ErrShortWrite, io.EOF}, // 5 // Force error in voters. {&testBlock, testBlockBytes, 108, io.ErrShortWrite, io.EOF}, // 8 // Force error in freshstake. {&testBlock, testBlockBytes, 110, io.ErrShortWrite, io.EOF}, // 9 // Force error in revocations. {&testBlock, testBlockBytes, 111, io.ErrShortWrite, io.EOF}, // 10 // Force error in poolsize. {&testBlock, testBlockBytes, 112, io.ErrShortWrite, io.EOF}, // 11 // Force error in difficulty bits. {&testBlock, testBlockBytes, 116, io.ErrShortWrite, io.EOF}, // 12 // Force error in stake difficulty bits. {&testBlock, testBlockBytes, 120, io.ErrShortWrite, io.EOF}, // 13 // Force error in height. {&testBlock, testBlockBytes, 128, io.ErrShortWrite, io.EOF}, // 14 // Force error in size. {&testBlock, testBlockBytes, 132, io.ErrShortWrite, io.EOF}, // 15 // Force error in timestamp. {&testBlock, testBlockBytes, 136, io.ErrShortWrite, io.EOF}, // 16 // Force error in nonce. {&testBlock, testBlockBytes, 140, io.ErrShortWrite, io.EOF}, // 17 // Force error in tx count. {&testBlock, testBlockBytes, 180, io.ErrShortWrite, io.EOF}, // 18 // Force error in tx. {&testBlock, testBlockBytes, 181, io.ErrShortWrite, io.EOF}, // 19 } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Serialize the block. w := newFixedWriter(test.max) err := test.in.Serialize(w) if err != test.writeErr { t.Errorf("Serialize #%d wrong error got: %v, want: %v", i, err, test.writeErr) continue } // Deserialize the block. var block wire.MsgBlock r := newFixedReader(test.max, test.buf) err = block.Deserialize(r) if err != test.readErr { t.Errorf("Deserialize #%d wrong error got: %v, want: %v", i, err, test.readErr) continue } var txLocBlock wire.MsgBlock br := bytes.NewBuffer(test.buf[0:test.max]) _, _, err = txLocBlock.DeserializeTxLoc(br) if err != test.readErr { t.Errorf("DeserializeTxLoc #%d wrong error got: %v, want: %v", i, err, test.readErr) continue } } } // TestBlockOverflowErrors performs tests to ensure deserializing blocks which // are intentionally crafted to use large values for the number of transactions // are handled properly. This could otherwise potentially be used as an attack // vector. func TestBlockOverflowErrors(t *testing.T) { // Use protocol version 70001 specifically here instead of the latest // protocol version because the test data is using bytes encoded with // that version. pver := uint32(1) tests := []struct { buf []byte // Wire encoding pver uint32 // Protocol version for wire encoding err error // Expected error }{ // Block that claims to have ~uint64(0) transactions. { []byte{ 0x01, 0x00, 0x00, 0x00, // Version 1 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // StakeRoot 0x00, 0x00, // VoteBits 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState 0x00, 0x00, // Voters 0x00, // FreshStake 0x00, // Revocations 0x00, 0x00, 0x00, 0x00, // Poolsize 0xff, 0xff, 0x00, 0x1d, // Bits 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits 0x01, 0x00, 0x00, 0x00, // Height 0x01, 0x00, 0x00, 0x00, // Size 0x61, 0xbc, 0x66, 0x49, // Timestamp 0x01, 0xe3, 0x62, 0x99, // Nonce 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ExtraData 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // TxnCount }, pver, &wire.MessageError{}, }, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { // Decode from wire format. var msg wire.MsgBlock r := bytes.NewReader(test.buf) err := msg.BtcDecode(r, test.pver) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("BtcDecode #%d wrong error got: %v, want: %v", i, err, reflect.TypeOf(test.err)) continue } // Deserialize from wire format. r = bytes.NewReader(test.buf) err = msg.Deserialize(r) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("Deserialize #%d wrong error got: %v, want: %v", i, err, reflect.TypeOf(test.err)) continue } // Deserialize with transaction location info from wire format. br := bytes.NewBuffer(test.buf) _, _, err = msg.DeserializeTxLoc(br) if reflect.TypeOf(err) != reflect.TypeOf(test.err) { t.Errorf("DeserializeTxLoc #%d wrong error got: %v, "+ "want: %v", i, err, reflect.TypeOf(test.err)) continue } } } // TestBlockSerializeSize performs tests to ensure the serialize size for // various blocks is accurate. func TestBlockSerializeSize(t *testing.T) { // Block with no transactions. noTxBlock := wire.NewMsgBlock(&testBlock.Header) tests := []struct { in *wire.MsgBlock // Block to encode size int // Expected serialized size }{ // Block with no transactions (header + 2x numtx) {noTxBlock, 182}, // First block in the mainnet block chain. {&testBlock, len(testBlockBytes)}, } t.Logf("Running %d tests", len(tests)) for i, test := range tests { serializedSize := test.in.SerializeSize() if serializedSize != test.size { t.Errorf("MsgBlock.SerializeSize: #%d got: %d, want: "+ "%d", i, serializedSize, test.size) continue } } } // testBlock is a basic normative block that is used throughout tests. var testBlock = wire.MsgBlock{ Header: wire.BlockHeader{ Version: 1, PrevBlock: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, }), MerkleRoot: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, }), StakeRoot: chainhash.Hash([chainhash.HashSize]byte{ // Make go vet happy. 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, }), VoteBits: uint16(0x0000), FinalState: [6]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, Voters: uint16(0x0000), FreshStake: uint8(0x00), Revocations: uint8(0x00), PoolSize: uint32(0x00000000), // Poolsize Bits: 0x1d00ffff, // 486604799 SBits: int64(0x0000000000000000), Height: uint32(1), Size: uint32(1), Timestamp: time.Unix(0x4966bc61, 0), // 2009-01-08 20:54:25 -0600 CST Nonce: 0x9962e301, // 2573394689 ExtraData: [36]byte{}, }, Transactions: []*wire.MsgTx{ { Version: 1, TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ Hash: chainhash.Hash{}, Index: 0xffffffff, Tree: dcrutil.TxTreeRegular, }, Sequence: 0xffffffff, ValueIn: 0x1616161616161616, BlockHeight: 0x17171717, BlockIndex: 0x18181818, SignatureScript: []byte{ 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, }, }, }, TxOut: []*wire.TxOut{ { Value: 0x3333333333333333, Version: 0x9898, PkScript: []byte{ 0x41, // OP_DATA_65 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, 0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16, 0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c, 0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c, 0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4, 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, 0xee, // 65-byte signature 0xac, // OP_CHECKSIG }, }, }, LockTime: 0x11111111, Expiry: 0x22222222, }, }, STransactions: []*wire.MsgTx{ { Version: 1, TxIn: []*wire.TxIn{ { PreviousOutPoint: wire.OutPoint{ Hash: chainhash.Hash{}, Index: 0xffffffff, Tree: dcrutil.TxTreeStake, }, Sequence: 0xffffffff, ValueIn: 0x1313131313131313, BlockHeight: 0x14141414, BlockIndex: 0x15151515, SignatureScript: []byte{ 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, }, }, }, TxOut: []*wire.TxOut{ { Value: 0x3333333333333333, Version: 0x1212, PkScript: []byte{ 0x41, // OP_DATA_65 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, 0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16, 0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c, 0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c, 0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4, 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, 0xee, // 65-byte signature 0xac, // OP_CHECKSIG }, }, }, LockTime: 0x11111111, Expiry: 0x22222222, }, }, } // testBlockBytes is the serialized bytes for the above test block (testBlock). var testBlockBytes = []byte{ // Begin block header 0x01, 0x00, 0x00, 0x00, // Version 1 [0] 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72, 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f, 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c, 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, // PrevBlock [4] 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // MerkleRoot [36] 0x98, 0x20, 0x51, 0xfd, 0x1e, 0x4b, 0xa7, 0x44, 0xbb, 0xbe, 0x68, 0x0e, 0x1f, 0xee, 0x14, 0x67, 0x7b, 0xa1, 0xa3, 0xc3, 0x54, 0x0b, 0xf7, 0xb1, 0xcd, 0xb6, 0x06, 0xe8, 0x57, 0x23, 0x3e, 0x0e, // StakeRoot [68] 0x00, 0x00, // VoteBits [100] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // FinalState [102] 0x00, 0x00, // Voters [108] 0x00, // FreshStake [110] 0x00, // Revocations [111] 0x00, 0x00, 0x00, 0x00, // Poolsize [112] 0xff, 0xff, 0x00, 0x1d, // Bits [116] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SBits [120] 0x01, 0x00, 0x00, 0x00, // Height [128] 0x01, 0x00, 0x00, 0x00, // Size [132] 0x61, 0xbc, 0x66, 0x49, // Timestamp [136] 0x01, 0xe3, 0x62, 0x99, // Nonce [140] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // ExtraData [144] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Announce number of txs 0x01, // TxnCount [180] // Begin bogus normal txs 0x01, 0x00, 0x00, 0x00, // Version [181] 0x01, // Varint for number of transaction inputs [185] 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash [186] 0xff, 0xff, 0xff, 0xff, // Prevous output index [218] 0x00, // Previous output tree [222] 0xff, 0xff, 0xff, 0xff, // Sequence [223] 0x01, // Varint for number of transaction outputs [227] 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // Transaction amount [228] 0x98, 0x98, // Script version 0x43, // Varint for length of pk script 0x41, // OP_DATA_65 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, 0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16, 0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c, 0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c, 0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4, 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, 0xee, // 65-byte signature 0xac, // OP_CHECKSIG 0x11, 0x11, 0x11, 0x11, // Lock time 0x22, 0x22, 0x22, 0x22, // Expiry 0x01, // Varint for number of signatures 0x16, 0x16, 0x16, 0x16, 0x16, 0x16, 0x16, 0x16, // ValueIn 0x17, 0x17, 0x17, 0x17, // BlockHeight 0x18, 0x18, 0x18, 0x18, // BlockIndex 0x07, // SigScript length 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, // Signature script (coinbase) // Announce number of stake txs 0x01, // TxnCount for stake tx // Begin bogus stake txs 0x01, 0x00, 0x00, 0x00, // Version 0x01, // Varint for number of transaction inputs 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Previous output hash 0xff, 0xff, 0xff, 0xff, // Prevous output index 0x01, // Previous output tree 0xff, 0xff, 0xff, 0xff, // Sequence 0x01, // Varint for number of transaction outputs 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, // Transaction amount 0x12, 0x12, // Script version 0x43, // Varint for length of pk script 0x41, // OP_DATA_65 0x04, 0x96, 0xb5, 0x38, 0xe8, 0x53, 0x51, 0x9c, 0x72, 0x6a, 0x2c, 0x91, 0xe6, 0x1e, 0xc1, 0x16, 0x00, 0xae, 0x13, 0x90, 0x81, 0x3a, 0x62, 0x7c, 0x66, 0xfb, 0x8b, 0xe7, 0x94, 0x7b, 0xe6, 0x3c, 0x52, 0xda, 0x75, 0x89, 0x37, 0x95, 0x15, 0xd4, 0xe0, 0xa6, 0x04, 0xf8, 0x14, 0x17, 0x81, 0xe6, 0x22, 0x94, 0x72, 0x11, 0x66, 0xbf, 0x62, 0x1e, 0x73, 0xa8, 0x2c, 0xbf, 0x23, 0x42, 0xc8, 0x58, 0xee, // 65-byte signature 0xac, // OP_CHECKSIG 0x11, 0x11, 0x11, 0x11, // Lock time 0x22, 0x22, 0x22, 0x22, // Expiry 0x01, // Varint for number of signatures 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, // ValueIn 0x14, 0x14, 0x14, 0x14, // BlockHeight 0x15, 0x15, 0x15, 0x15, // BlockIndex 0x07, // SigScript length 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0xf2, // Signature script (coinbase) } // Transaction location information for the test block transactions. var testBlockTxLocs = []wire.TxLoc{ {TxStart: 181, TxLen: 158}, } // Transaction location information for the test block stake transactions. var testBlockSTxLocs = []wire.TxLoc{ {TxStart: 340, TxLen: 158}, }
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1"> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="author" content="Ginolhac"> <meta name="generator" content="Hugo 0.42.1" /> <title>Posts &middot; Aurélien&#39; blog</title> <link rel="shortcut icon" href="//ginolhac.github.io/images/favicon.ico"> <link rel="stylesheet" href="//ginolhac.github.io/css/style.css"> <link rel="stylesheet" href="//ginolhac.github.io/css/highlight.css"> <link href="//ginolhac.github.io/styles/github.min.css" rel="stylesheet"> <link rel="stylesheet" href="//ginolhac.github.io/css/font-awesome.min.css"> </head> <body> <nav class="main-nav"> <a href='//ginolhac.github.io/'> <span class="arrow">←</span>Home</a> <a href='//ginolhac.github.io/posts'>Archive</a> <a href='//ginolhac.github.io/karate'>Karate</a> <a href='//ginolhac.github.io/tags'>Tags</a> <a href='//ginolhac.github.io/about'>About</a> </nav> <div class="profile"> <section id="wrapper"> <header id="header"> <a href='//ginolhac.github.io/about'> <img id="avatar" class="2x" src="//ginolhac.github.io/images/avatar.png"/> </a> <h1>Aurélien&#39; blog</h1> <h2>bioinformatic and data science</h2> </header> </section> </div> <section id="wrapper" class="home"> <div class="archive"> <h3>2018</h3> <ul> <div class="post-item"> <div class="post-time">Mar 26</div> <a href="//ginolhac.github.io/posts/latex-modern-cv/" class="post-link"> LaTex modern CV </a> </div> <div class="post-item"> <div class="post-time">Jan 27</div> <a href="//ginolhac.github.io/posts/diy-raspberry-monitored-via-telegram/" class="post-link"> home surveillance monitored via telegram </a> </div> </ul> </div> <div class="archive"> <h3>2016</h3> <ul> <div class="post-item"> <div class="post-time">Dec 8</div> <a href="//ginolhac.github.io/posts/tweening-a-poisson-distribution/" class="post-link"> tweening a Poisson distribution </a> </div> <div class="post-item"> <div class="post-time">Jul 31</div> <a href="//ginolhac.github.io/posts/teething-process/" class="post-link"> teething </a> </div> </ul> </div> <div class="archive"> <h3>2015</h3> <ul> <div class="post-item"> <div class="post-time">Jan 25</div> <a href="//ginolhac.github.io/posts/winter-is-coming/" class="post-link"> winter is coming </a> </div> </ul> </div> <footer id="footer"> <div id="social"> <a class="symbol" href="https://github.com/ginolhac"> <i class="fa fa-github-square"></i> </a> <a class="symbol" href="https://www.linkedin.com/in/aur%c3%a9lien-ginolhac-07b33b92/"> <i class="fa fa-linkedin-square"></i> </a> <a class="symbol" href="https://twitter.com/kingsushigino"> <i class="fa fa-twitter-square"></i> </a> </div> <p class="small"> © Copyright 2021 <i class="fa fa-heart" aria-hidden="true"></i> Ginolhac </p> <p class="small"> Powered by <a href="//www.gohugo.io/">Hugo</a> Theme By <a href="https://github.com/nodejh/hugo-theme-cactus-plus">nodejh</a> </p> <script src="//yihui.name/js/math-code.js"></script> <script async src="//cdn.bootcss.com/mathjax/2.7.1/MathJax.js?config=TeX-MML-AM_CHTML"> </script> <script src="//ginolhac.github.io/highlight.min.js"></script> <script src="//ginolhac.github.io/languages/r.min.js"></script> <script> hljs.configure({languages: []}); hljs.initHighlightingOnLoad(); </script> </footer> </section> <div class="dd"> </div> <script src="//ginolhac.github.io/js/jquery-3.3.1.min.js"></script> <script src="//ginolhac.github.io/js/main.js"></script> <script src="//ginolhac.github.io/js/highlight.min.js"></script> <script>hljs.initHighlightingOnLoad();</script> <script> var doNotTrack = false; if (!doNotTrack) { (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) })(window,document,'script','https://www.google-analytics.com/analytics.js','ga'); ga('create', 'UA-29962051-1', 'auto'); ga('send', 'pageview'); } </script> </body> </html>
b'Evaluate -20 + (54 - 15) + -14.\n'
b'What is the value of -20 - (-4 + 12 - 17)?\n'
b'0 - (-9 + 10 + 2 + -14)\n'
name 'google_app_engine' description 'A cookbook to download and install the google app engine SDK on a Linux system.' version '1.0.0' maintainer 'Bernd Hoffmann' maintainer_email '[email protected]' license 'MIT' long_description IO.read(File.join(File.dirname(__FILE__), 'README.md'))
# gulp boilerplate run `npm start` then open another termianl run `gulp watch` ,change some files for browser-syn ## Gulp tasks * gulp * gulp prod
/* * DISTRHO Plugin Framework (DPF) * Copyright (C) 2012-2014 Filipe Coelho <[email protected]> * * Permission to use, copy, modify, and/or distribute this software for any purpose with * or without fee is hereby granted, provided that the above copyright notice and this * permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD * TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER * IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "DistrhoPluginInternal.hpp" #include "lv2/atom.h" #include "lv2/buf-size.h" #include "lv2/data-access.h" #include "lv2/instance-access.h" #include "lv2/midi.h" #include "lv2/options.h" #include "lv2/port-props.h" #include "lv2/resize-port.h" #include "lv2/state.h" #include "lv2/time.h" #include "lv2/ui.h" #include "lv2/units.h" #include "lv2/urid.h" #include "lv2/worker.h" #include "lv2/lv2_kxstudio_properties.h" #include "lv2/lv2_programs.h" #include <fstream> #include <iostream> #ifndef DISTRHO_PLUGIN_URI # error DISTRHO_PLUGIN_URI undefined! #endif #ifndef DISTRHO_PLUGIN_MINIMUM_BUFFER_SIZE # define DISTRHO_PLUGIN_MINIMUM_BUFFER_SIZE 2048 #endif #define DISTRHO_LV2_USE_EVENTS_IN (DISTRHO_PLUGIN_HAS_MIDI_INPUT || DISTRHO_PLUGIN_WANT_TIMEPOS || (DISTRHO_PLUGIN_WANT_STATE && DISTRHO_PLUGIN_HAS_UI)) #define DISTRHO_LV2_USE_EVENTS_OUT (DISTRHO_PLUGIN_HAS_MIDI_OUTPUT || (DISTRHO_PLUGIN_WANT_STATE && DISTRHO_PLUGIN_HAS_UI)) // ----------------------------------------------------------------------- DISTRHO_PLUGIN_EXPORT void lv2_generate_ttl(const char* const basename) { USE_NAMESPACE_DISTRHO // Dummy plugin to get data from d_lastBufferSize = 512; d_lastSampleRate = 44100.0; PluginExporter plugin; d_lastBufferSize = 0; d_lastSampleRate = 0.0; d_string pluginDLL(basename); d_string pluginTTL(pluginDLL + ".ttl"); // --------------------------------------------- { std::cout << "Writing manifest.ttl..."; std::cout.flush(); std::fstream manifestFile("manifest.ttl", std::ios::out); d_string manifestString; manifestString += "@prefix lv2: <" LV2_CORE_PREFIX "> .\n"; manifestString += "@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n"; #if DISTRHO_PLUGIN_HAS_UI manifestString += "@prefix ui: <" LV2_UI_PREFIX "> .\n"; #endif manifestString += "\n"; manifestString += "<" DISTRHO_PLUGIN_URI ">\n"; manifestString += " a lv2:Plugin ;\n"; manifestString += " lv2:binary <" + pluginDLL + "." DISTRHO_DLL_EXTENSION "> ;\n"; manifestString += " rdfs:seeAlso <" + pluginTTL + "> .\n"; manifestString += "\n"; #if DISTRHO_PLUGIN_HAS_UI manifestString += "<" DISTRHO_UI_URI ">\n"; # if DISTRHO_OS_HAIKU manifestString += " a ui:BeUI ;\n"; # elif DISTRHO_OS_MAC manifestString += " a ui:CocoaUI ;\n"; # elif DISTRHO_OS_WINDOWS manifestString += " a ui:WindowsUI ;\n"; # else manifestString += " a ui:X11UI ;\n"; # endif # if ! DISTRHO_PLUGIN_WANT_DIRECT_ACCESS d_string pluginUI(pluginDLL); pluginUI.truncate(pluginDLL.rfind("_dsp")); pluginUI += "_ui"; manifestString += " ui:binary <" + pluginUI + "." DISTRHO_DLL_EXTENSION "> ;\n"; # else manifestString += " ui:binary <" + pluginDLL + "." DISTRHO_DLL_EXTENSION "> ;\n"; #endif manifestString += " lv2:extensionData ui:idleInterface ,\n"; # if DISTRHO_PLUGIN_WANT_PROGRAMS manifestString += " ui:showInterface ,\n"; manifestString += " <" LV2_PROGRAMS__Interface "> ;\n"; # else manifestString += " ui:showInterface ;\n"; # endif manifestString += " lv2:optionalFeature ui:noUserResize ,\n"; manifestString += " ui:resize ,\n"; manifestString += " ui:touch ;\n"; # if DISTRHO_PLUGIN_WANT_DIRECT_ACCESS manifestString += " lv2:requiredFeature <" LV2_DATA_ACCESS_URI "> ,\n"; manifestString += " <" LV2_INSTANCE_ACCESS_URI "> ,\n"; manifestString += " <" LV2_OPTIONS__options "> ,\n"; # else manifestString += " lv2:requiredFeature <" LV2_OPTIONS__options "> ,\n"; # endif manifestString += " <" LV2_URID__map "> .\n"; #endif manifestFile << manifestString << std::endl; manifestFile.close(); std::cout << " done!" << std::endl; } // --------------------------------------------- { std::cout << "Writing " << pluginTTL << "..."; std::cout.flush(); std::fstream pluginFile(pluginTTL, std::ios::out); d_string pluginString; // header #if DISTRHO_LV2_USE_EVENTS_IN pluginString += "@prefix atom: <" LV2_ATOM_PREFIX "> .\n"; #endif pluginString += "@prefix doap: <http://usefulinc.com/ns/doap#> .\n"; pluginString += "@prefix foaf: <http://xmlns.com/foaf/0.1/> .\n"; pluginString += "@prefix lv2: <" LV2_CORE_PREFIX "> .\n"; pluginString += "@prefix rsz: <" LV2_RESIZE_PORT_PREFIX "> .\n"; #if DISTRHO_PLUGIN_HAS_UI pluginString += "@prefix ui: <" LV2_UI_PREFIX "> .\n"; #endif pluginString += "@prefix unit: <" LV2_UNITS_PREFIX "> .\n"; pluginString += "\n"; // plugin pluginString += "<" DISTRHO_PLUGIN_URI ">\n"; #if DISTRHO_PLUGIN_IS_SYNTH pluginString += " a lv2:InstrumentPlugin, lv2:Plugin ;\n"; #else pluginString += " a lv2:Plugin ;\n"; #endif pluginString += "\n"; // extensionData pluginString += " lv2:extensionData <" LV2_STATE__interface "> "; #if DISTRHO_PLUGIN_WANT_STATE pluginString += ",\n <" LV2_OPTIONS__interface "> "; pluginString += ",\n <" LV2_WORKER__interface "> "; #endif #if DISTRHO_PLUGIN_WANT_PROGRAMS pluginString += ",\n <" LV2_PROGRAMS__Interface "> "; #endif pluginString += ";\n\n"; // optionalFeatures #if DISTRHO_PLUGIN_IS_RT_SAFE pluginString += " lv2:optionalFeature <" LV2_CORE__hardRTCapable "> ,\n"; pluginString += " <" LV2_BUF_SIZE__boundedBlockLength "> ;\n"; #else pluginString += " lv2:optionalFeature <" LV2_BUF_SIZE__boundedBlockLength "> ;\n"; #endif pluginString += "\n"; // requiredFeatures pluginString += " lv2:requiredFeature <" LV2_OPTIONS__options "> "; pluginString += ",\n <" LV2_URID__map "> "; #if DISTRHO_PLUGIN_WANT_STATE pluginString += ",\n <" LV2_WORKER__schedule "> "; #endif pluginString += ";\n\n"; // UI #if DISTRHO_PLUGIN_HAS_UI pluginString += " ui:ui <" DISTRHO_UI_URI "> ;\n"; pluginString += "\n"; #endif { uint32_t portIndex = 0; #if DISTRHO_PLUGIN_NUM_INPUTS > 0 for (uint32_t i=0; i < DISTRHO_PLUGIN_NUM_INPUTS; ++i, ++portIndex) { if (i == 0) pluginString += " lv2:port [\n"; else pluginString += " [\n"; pluginString += " a lv2:InputPort, lv2:AudioPort ;\n"; pluginString += " lv2:index " + d_string(portIndex) + " ;\n"; pluginString += " lv2:symbol \"lv2_audio_in_" + d_string(i+1) + "\" ;\n"; pluginString += " lv2:name \"Audio Input " + d_string(i+1) + "\" ;\n"; if (i+1 == DISTRHO_PLUGIN_NUM_INPUTS) pluginString += " ] ;\n\n"; else pluginString += " ] ,\n"; } pluginString += "\n"; #endif #if DISTRHO_PLUGIN_NUM_OUTPUTS > 0 for (uint32_t i=0; i < DISTRHO_PLUGIN_NUM_OUTPUTS; ++i, ++portIndex) { if (i == 0) pluginString += " lv2:port [\n"; else pluginString += " [\n"; pluginString += " a lv2:OutputPort, lv2:AudioPort ;\n"; pluginString += " lv2:index " + d_string(portIndex) + " ;\n"; pluginString += " lv2:symbol \"lv2_audio_out_" + d_string(i+1) + "\" ;\n"; pluginString += " lv2:name \"Audio Output " + d_string(i+1) + "\" ;\n"; if (i+1 == DISTRHO_PLUGIN_NUM_OUTPUTS) pluginString += " ] ;\n\n"; else pluginString += " ] ,\n"; } pluginString += "\n"; #endif #if DISTRHO_LV2_USE_EVENTS_IN pluginString += " lv2:port [\n"; pluginString += " a lv2:InputPort, atom:AtomPort ;\n"; pluginString += " lv2:index " + d_string(portIndex) + " ;\n"; pluginString += " lv2:name \"Events Input\" ;\n"; pluginString += " lv2:symbol \"lv2_events_in\" ;\n"; pluginString += " rsz:minimumSize " + d_string(DISTRHO_PLUGIN_MINIMUM_BUFFER_SIZE) + " ;\n"; pluginString += " atom:bufferType atom:Sequence ;\n"; # if (DISTRHO_PLUGIN_WANT_STATE && DISTRHO_PLUGIN_HAS_UI) pluginString += " atom:supports <" LV2_ATOM__String "> ;\n"; # endif # if DISTRHO_PLUGIN_HAS_MIDI_INPUT pluginString += " atom:supports <" LV2_MIDI__MidiEvent "> ;\n"; # endif # if DISTRHO_PLUGIN_WANT_TIMEPOS pluginString += " atom:supports <" LV2_TIME__Position "> ;\n"; # endif pluginString += " ] ;\n\n"; ++portIndex; #endif #if DISTRHO_LV2_USE_EVENTS_OUT pluginString += " lv2:port [\n"; pluginString += " a lv2:OutputPort, atom:AtomPort ;\n"; pluginString += " lv2:index " + d_string(portIndex) + " ;\n"; pluginString += " lv2:name \"Events Output\" ;\n"; pluginString += " lv2:symbol \"lv2_events_out\" ;\n"; pluginString += " rsz:minimumSize " + d_string(DISTRHO_PLUGIN_MINIMUM_BUFFER_SIZE) + " ;\n"; pluginString += " atom:bufferType atom:Sequence ;\n"; # if (DISTRHO_PLUGIN_WANT_STATE && DISTRHO_PLUGIN_HAS_UI) pluginString += " atom:supports <" LV2_ATOM__String "> ;\n"; # endif # if DISTRHO_PLUGIN_HAS_MIDI_OUTPUT pluginString += " atom:supports <" LV2_MIDI__MidiEvent "> ;\n"; # endif pluginString += " ] ;\n\n"; ++portIndex; #endif #if DISTRHO_PLUGIN_WANT_LATENCY pluginString += " lv2:port [\n"; pluginString += " a lv2:OutputPort, lv2:ControlPort ;\n"; pluginString += " lv2:index " + d_string(portIndex) + " ;\n"; pluginString += " lv2:name \"Latency\" ;\n"; pluginString += " lv2:symbol \"lv2_latency\" ;\n"; pluginString += " lv2:designation lv2:latency ;\n"; pluginString += " lv2:portProperty lv2:reportsLatency, lv2:integer ;\n"; pluginString += " ] ;\n\n"; ++portIndex; #endif for (uint32_t i=0, count=plugin.getParameterCount(); i < count; ++i, ++portIndex) { if (i == 0) pluginString += " lv2:port [\n"; else pluginString += " [\n"; if (plugin.isParameterOutput(i)) pluginString += " a lv2:OutputPort, lv2:ControlPort ;\n"; else pluginString += " a lv2:InputPort, lv2:ControlPort ;\n"; pluginString += " lv2:index " + d_string(portIndex) + " ;\n"; pluginString += " lv2:name \"" + plugin.getParameterName(i) + "\" ;\n"; // symbol { d_string symbol(plugin.getParameterSymbol(i)); if (symbol.isEmpty()) symbol = "lv2_port_" + d_string(portIndex-1); pluginString += " lv2:symbol \"" + symbol + "\" ;\n"; } // ranges { const ParameterRanges& ranges(plugin.getParameterRanges(i)); if (plugin.getParameterHints(i) & kParameterIsInteger) { pluginString += " lv2:default " + d_string(int(plugin.getParameterValue(i))) + " ;\n"; pluginString += " lv2:minimum " + d_string(int(ranges.min)) + " ;\n"; pluginString += " lv2:maximum " + d_string(int(ranges.max)) + " ;\n"; } else { pluginString += " lv2:default " + d_string(plugin.getParameterValue(i)) + " ;\n"; pluginString += " lv2:minimum " + d_string(ranges.min) + " ;\n"; pluginString += " lv2:maximum " + d_string(ranges.max) + " ;\n"; } } // unit { const d_string& unit(plugin.getParameterUnit(i)); if (! unit.isEmpty()) { if (unit == "db" || unit == "dB") { pluginString += " unit:unit unit:db ;\n"; } else if (unit == "hz" || unit == "Hz") { pluginString += " unit:unit unit:hz ;\n"; } else if (unit == "khz" || unit == "kHz") { pluginString += " unit:unit unit:khz ;\n"; } else if (unit == "mhz" || unit == "mHz") { pluginString += " unit:unit unit:mhz ;\n"; } else if (unit == "%") { pluginString += " unit:unit unit:pc ;\n"; } else { pluginString += " unit:unit [\n"; pluginString += " a unit:Unit ;\n"; pluginString += " unit:name \"" + unit + "\" ;\n"; pluginString += " unit:symbol \"" + unit + "\" ;\n"; pluginString += " unit:render \"%f " + unit + "\" ;\n"; pluginString += " ] ;\n"; } } } // hints { const uint32_t hints(plugin.getParameterHints(i)); if (hints & kParameterIsBoolean) pluginString += " lv2:portProperty lv2:toggled ;\n"; if (hints & kParameterIsInteger) pluginString += " lv2:portProperty lv2:integer ;\n"; if (hints & kParameterIsLogarithmic) pluginString += " lv2:portProperty <" LV2_PORT_PROPS__logarithmic "> ;\n"; if ((hints & kParameterIsAutomable) == 0 && ! plugin.isParameterOutput(i)) { pluginString += " lv2:portProperty <" LV2_PORT_PROPS__expensive "> ,\n"; pluginString += " <" LV2_KXSTUDIO_PROPERTIES__NonAutomable "> ;\n"; } } if (i+1 == count) pluginString += " ] ;\n\n"; else pluginString += " ] ,\n"; } } pluginString += " doap:name \"" + d_string(plugin.getName()) + "\" ;\n"; pluginString += " doap:maintainer [ foaf:name \"" + d_string(plugin.getMaker()) + "\" ] .\n"; pluginFile << pluginString << std::endl; pluginFile.close(); std::cout << " done!" << std::endl; } }
b'-6 - (2 - 10 - -4 - -2)\n'
b'Calculate 2 + -6 + (2 - 3) + -5 + 4.\n'
b'What is the value of (-7 - 5) + 18 + (0 - 4 - -2)?\n'
var config = require('./config'); var express = require('express'); var superagent = require('superagent'); /** * Auth Token */ var authToken = null; var expires = 0; var expires_in = 20160; // 14 days in minutes /** * Urls */ var OAUTH = 'https://www.arcgis.com/sharing/oauth2/token'; var GEOCODE = 'http://geocode.arcgis.com/arcgis/rest/services/World/GeocodeServer'; /** * ESRI Query Parameter Defaults */ var CATEGORY = 'Address'; var CENTER = config.geocode.center; var DISTANCE = 160 * 1000; // meters /** * Expose `router` */ var router = module.exports = express.Router(); /** * Expose `encode` & `reverse` */ module.exports.encode = encode; module.exports.reverse = reverse; module.exports.suggest = suggest; /** * Geocode */ router.get('/:address', function(req, res) { encode(req.params.address, function(err, addresses) { if (err) { console.error(err); res.status(400).send(err); } else { var ll = addresses[0].feature.geometry; res.status(200).send({ lng: ll.x, lat: ll.y }); } }); }); /** * Reverse */ router.get('/reverse/:coordinate', function(req, res) { reverse(req.params.coordinate, function(err, address) { if (err) { console.error(err); res.status(400).send(err); } else { res.status(200).send(address); } }); }); /** * Suggest */ router.get('/suggest/:text', function(req, res) { suggest(req.params.text, function(err, suggestions) { if (err) { console.error(err); res.status(400).send(err); } else { res.status(200).send(suggestions); } }); }); /** * Geocode */ function encode(address, callback) { var text = ''; if (address.address) { text = address.address + ', ' + address.city + ', ' + address.state + ' ' + address.zip; } else { text = address; } auth(callback, function(token) { superagent .get(GEOCODE + '/find') .query({ category: CATEGORY, f: 'json', text: text, token: token }) .end(function(err, res) { if (err) { callback(err, res); } else { var body = parseResponse(res, callback); if (!body || !body.locations || body.locations.length === 0) { callback(new Error('Location not found.')); } else { callback(null, body.locations); } } }); }); } /** * Reverse geocode */ function reverse(ll, callback) { var location = ll; if (ll.lng) { location = ll.lng + ',' + ll.lat; } else if (ll.x) { location = ll.x + ',' + ll.y; } else if (ll[0]) { location = ll[0] + ',' + ll[1]; } auth(callback, function(token) { superagent .get(GEOCODE + '/reverseGeocode') .query({ f: 'json', location: location, token: token }) .end(function(err, res) { if (err) { callback(err, res); } else { var body = parseResponse(res, callback); if (!body || !body.address) { callback(new Error('Location not found.')); } else { var addr = body.address; callback(null, { address: addr.Address, neighborhood: addr.Neighborhood, city: addr.City, county: addr.Subregion, state: addr.Region, zip: parseInt(addr.Postal, 10), country: addr.CountryCode }); } } }); }); } /** * Auto suggest */ function suggest(text, callback) { auth(callback, function(token) { superagent .get(GEOCODE + '/suggest') .query({ category: CATEGORY, distance: DISTANCE, f: 'json', location: CENTER, text: text, token: token }) .end(function(err, res) { if (err) { callback(err, res); } else { var body = parseResponse(res, callback); callback(null, body.suggestions); } }); }); } /** * Auth? */ function auth(callback, next) { generateAuthToken(function(err, token) { if (err) { callback(err); } else { next(token); } }); } /** * Parse */ function parseResponse(res, callback) { try { return JSON.parse(res.text); } catch (e) { callback(e); } } /** * Generate an auth token */ function generateAuthToken(callback) { // If we're within 7 days of auth token expiration, generate a new one if ((expires - expires_in / 2) < Date.now().valueOf()) { superagent .get(OAUTH) .query({ client_id: config.arcgis_id, client_secret: config.arcgis_secret, expiration: expires_in, grant_type: 'client_credentials' }) .end(function(err, res) { if (err || res.error || !res.ok) { callback(err || res.error || res.text); } else { authToken = res.body.access_token; // Set the expires time expires = new Date(); expires.setSeconds(expires.getSeconds() + res.body.expires_in); expires = expires.valueOf(); callback(null, authToken); } }); } else { callback(null, authToken); } }
b'Evaluate -11 - (-21 + 16) - (-1 + 12).\n'
b'(-520 - -550) + (-2 - 41)\n'
<?xml version="1.0" encoding="iso-8859-1"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html> <head> <title>readlines (Buffering)</title> <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" /> <link rel="stylesheet" href="../.././rdoc-style.css" type="text/css" media="screen" /> </head> <body class="standalone-code"> <pre><span class="ruby-comment cmt"># File lib/openssl/buffering.rb, line 124</span> <span class="ruby-keyword kw">def</span> <span class="ruby-identifier">readlines</span>(<span class="ruby-identifier">eol</span>=<span class="ruby-identifier">$/</span>) <span class="ruby-identifier">ary</span> = [] <span class="ruby-keyword kw">while</span> <span class="ruby-identifier">line</span> = <span class="ruby-keyword kw">self</span>.<span class="ruby-identifier">gets</span>(<span class="ruby-identifier">eol</span>) <span class="ruby-identifier">ary</span> <span class="ruby-operator">&lt;&lt;</span> <span class="ruby-identifier">line</span> <span class="ruby-keyword kw">end</span> <span class="ruby-identifier">ary</span> <span class="ruby-keyword kw">end</span></pre> </body> </html>
// // IGViewController.h // KaifKit // // Created by Francis Chong on 04/15/2015. // Copyright (c) 2014 Francis Chong. All rights reserved. // #import <UIKit/UIKit.h> @interface IGViewController : UIViewController @end
b'What is 972 - 943 - (42 - -2)?\n'
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to [email protected] so we can send you a copy immediately. * * @category Zend * @package Zend_Gdata * @subpackage YouTube * @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id: Username.php 24594 2012-01-05 21:27:01Z matthew $ */ /** * @see Zend_Gdata_Extension */ // require_once 'Zend/Gdata/Extension.php'; /** * Represents the yt:username element * * @category Zend * @package Zend_Gdata * @subpackage YouTube * @copyright Copyright (c) 2005-2012 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ class Zend_Gdata_YouTube_Extension_Username extends Zend_Gdata_Extension { protected $_rootElement = 'username'; protected $_rootNamespace = 'yt'; public function __construct($text = null) { $this->registerAllNamespaces(Zend_Gdata_YouTube::$namespaces); parent::__construct(); $this->_text = $text; } }
b'-17 - (-51 + -4 - -22)\n'
b'Evaluate (8 - 2) + -4 + (-6 - -23 - 14).\n'
b'What is 3 + (29 - 23) + (4 - 2) + 0?\n'
b'What is the value of 44 + -7 + -6 + -16?\n'
b'What is the value of (-7 + -14 + 7 - -41) + -10?\n'
b'(1 - 1) + -1 + (-26 - -72) + -62\n'
import numpy as np import pandas as pd from pandas import Series, DataFrame from scipy.spatial import distance import matplotlib.pyplot as plt from sklearn.cluster import DBSCAN from sklearn import metrics from sklearn.datasets.samples_generator import make_blobs from sklearn.preprocessing import StandardScaler from sklearn import decomposition # PCA from sklearn.metrics import confusion_matrix import json import ml.Features as ft from utils import Utils class Identifier(object): def __init__(self): columns = ['mean_height', 'min_height', 'max_height', 'mean_width', 'min_width', 'max_width', 'time', 'girth','id'] self.data = DataFrame(columns=columns) self.event = [] @staticmethod def subscribe(ch, method, properties, body): """ prints the body message. It's the default callback method :param ch: keep null :param method: keep null :param properties: keep null :param body: the message :return: """ #first we get the JSON from body #we check if it's part of the walking event #if walking event is completed, we if __name__ == '__main__': # we setup needed params MAX_HEIGHT = 203 MAX_WIDTH = 142 SPEED = 3 SAMPLING_RATE = 8 mq_host = '172.26.56.122' queue_name = 'door_data' # setting up MQTT subscriber Utils.sub(queue_name=queue_name,callback=subscribe,host=mq_host)
b'-9 + -3 + (22 + 17 - 29)\n'
<?php /* Template Name: Full Page Width Template */ get_header(); while ( have_posts() ) { the_post(); get_template_part( 'content', 'page-full' ); } // end of the loop get_footer();
b'Evaluate -2 - (-11 + (3 - -1 - -2) + 3).\n'
b'17 - ((-3 - -17) + 14) - -1\n'
// Copyright (c) 2021 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package indexers import ( "context" "fmt" "sync" "sync/atomic" "github.com/decred/dcrd/blockchain/v4/internal/progresslog" "github.com/decred/dcrd/database/v3" "github.com/decred/dcrd/dcrutil/v4" ) // IndexNtfnType represents an index notification type. type IndexNtfnType int const ( // ConnectNtfn indicates the index notification signals a block // connected to the main chain. ConnectNtfn IndexNtfnType = iota // DisconnectNtfn indicates the index notification signals a block // disconnected from the main chain. DisconnectNtfn ) var ( // bufferSize represents the index notification buffer size. bufferSize = 128 // noPrereqs indicates no index prerequisites. noPrereqs = "none" ) // IndexNtfn represents an index notification detailing a block connection // or disconnection. type IndexNtfn struct { NtfnType IndexNtfnType Block *dcrutil.Block Parent *dcrutil.Block PrevScripts PrevScripter IsTreasuryEnabled bool Done chan bool } // IndexSubscription represents a subscription for index updates. type IndexSubscription struct { id string idx Indexer subscriber *IndexSubscriber mtx sync.Mutex // prerequisite defines the notification processing hierarchy for this // subscription. It is expected that the subscriber associated with the // prerequisite provided processes notifications before they are // delivered by this subscription to its subscriber. An empty string // indicates the subscription has no prerequisite. prerequisite string // dependent defines the index subscription that requires the subscriber // associated with this subscription to have processed incoming // notifications before it does. A nil dependency indicates the subscription // has no dependencies. dependent *IndexSubscription } // newIndexSubscription initializes a new index subscription. func newIndexSubscription(subber *IndexSubscriber, indexer Indexer, prereq string) *IndexSubscription { return &IndexSubscription{ id: indexer.Name(), idx: indexer, prerequisite: prereq, subscriber: subber, } } // stop prevents any future index updates from being delivered and // unsubscribes the associated subscription. func (s *IndexSubscription) stop() error { // If the subscription has a prerequisite, find it and remove the // subscription as a dependency. if s.prerequisite != noPrereqs { s.mtx.Lock() prereq, ok := s.subscriber.subscriptions[s.prerequisite] s.mtx.Unlock() if !ok { return fmt.Errorf("no subscription found with id %s", s.prerequisite) } prereq.mtx.Lock() prereq.dependent = nil prereq.mtx.Unlock() return nil } // If the subscription has a dependent, stop it as well. if s.dependent != nil { err := s.dependent.stop() if err != nil { return err } } // If the subscription is independent, remove it from the // index subscriber's subscriptions. s.mtx.Lock() delete(s.subscriber.subscriptions, s.id) s.mtx.Unlock() return nil } // IndexSubscriber subscribes clients for index updates. type IndexSubscriber struct { subscribers uint32 // update atomically. c chan IndexNtfn subscriptions map[string]*IndexSubscription mtx sync.Mutex ctx context.Context cancel context.CancelFunc quit chan struct{} } // NewIndexSubscriber creates a new index subscriber. It also starts the // handler for incoming index update subscriptions. func NewIndexSubscriber(sCtx context.Context) *IndexSubscriber { ctx, cancel := context.WithCancel(sCtx) s := &IndexSubscriber{ c: make(chan IndexNtfn, bufferSize), subscriptions: make(map[string]*IndexSubscription), ctx: ctx, cancel: cancel, quit: make(chan struct{}), } return s } // Subscribe subscribes an index for updates. The returned index subscription // has functions to retrieve a channel that produces a stream of index updates // and to stop the stream when the caller no longer wishes to receive updates. func (s *IndexSubscriber) Subscribe(index Indexer, prerequisite string) (*IndexSubscription, error) { sub := newIndexSubscription(s, index, prerequisite) // If the subscription has a prequisite, find it and set the subscription // as a dependency. if prerequisite != noPrereqs { s.mtx.Lock() prereq, ok := s.subscriptions[prerequisite] s.mtx.Unlock() if !ok { return nil, fmt.Errorf("no subscription found with id %s", prerequisite) } prereq.mtx.Lock() defer prereq.mtx.Unlock() if prereq.dependent != nil { return nil, fmt.Errorf("%s already has a dependent set: %s", prereq.id, prereq.dependent.id) } prereq.dependent = sub atomic.AddUint32(&s.subscribers, 1) return sub, nil } // If the subscription does not have a prerequisite, add it to the index // subscriber's subscriptions. s.mtx.Lock() s.subscriptions[sub.id] = sub s.mtx.Unlock() atomic.AddUint32(&s.subscribers, 1) return sub, nil } // Notify relays an index notification to subscribed indexes for processing. func (s *IndexSubscriber) Notify(ntfn *IndexNtfn) { subscribers := atomic.LoadUint32(&s.subscribers) // Only relay notifications when there are subscribed indexes // to be notified. if subscribers > 0 { select { case <-s.quit: case s.c <- *ntfn: } } } // findLowestIndexTipHeight determines the lowest index tip height among // subscribed indexes and their dependencies. func (s *IndexSubscriber) findLowestIndexTipHeight(queryer ChainQueryer) (int64, int64, error) { // Find the lowest tip height to catch up among subscribed indexes. bestHeight, _ := queryer.Best() lowestHeight := bestHeight for _, sub := range s.subscriptions { tipHeight, tipHash, err := sub.idx.Tip() if err != nil { return 0, bestHeight, err } // Ensure the index tip is on the main chain. if !queryer.MainChainHasBlock(tipHash) { return 0, bestHeight, fmt.Errorf("%s: index tip (%s) is not on the "+ "main chain", sub.idx.Name(), tipHash) } if tipHeight < lowestHeight { lowestHeight = tipHeight } // Update the lowest tip height if a dependent has a lower tip height. dependent := sub.dependent for dependent != nil { tipHeight, _, err := sub.dependent.idx.Tip() if err != nil { return 0, bestHeight, err } if tipHeight < lowestHeight { lowestHeight = tipHeight } dependent = dependent.dependent } } return lowestHeight, bestHeight, nil } // CatchUp syncs all subscribed indexes to the the main chain by connecting // blocks from after the lowest index tip to the current main chain tip. // // This should be called after all indexes have subscribed for updates. func (s *IndexSubscriber) CatchUp(ctx context.Context, db database.DB, queryer ChainQueryer) error { lowestHeight, bestHeight, err := s.findLowestIndexTipHeight(queryer) if err != nil { return err } // Nothing to do if all indexes are synced. if bestHeight == lowestHeight { return nil } // Create a progress logger for the indexing process below. progressLogger := progresslog.NewBlockProgressLogger("Indexed", log) // tip and need to be caught up, so log the details and loop through // each block that needs to be indexed. log.Infof("Catching up from height %d to %d", lowestHeight, bestHeight) var cachedParent *dcrutil.Block for height := lowestHeight + 1; height <= bestHeight; height++ { if interruptRequested(ctx) { return indexerError(ErrInterruptRequested, interruptMsg) } hash, err := queryer.BlockHashByHeight(height) if err != nil { return err } // Ensure the next tip hash is on the main chain. if !queryer.MainChainHasBlock(hash) { msg := fmt.Sprintf("the next block being synced to (%s) "+ "at height %d is not on the main chain", hash, height) return indexerError(ErrBlockNotOnMainChain, msg) } var parent *dcrutil.Block if cachedParent == nil && height > 0 { parentHash, err := queryer.BlockHashByHeight(height - 1) if err != nil { return err } parent, err = queryer.BlockByHash(parentHash) if err != nil { return err } } else { parent = cachedParent } child, err := queryer.BlockByHash(hash) if err != nil { return err } // Construct and send the index notification. var prevScripts PrevScripter err = db.View(func(dbTx database.Tx) error { if interruptRequested(ctx) { return indexerError(ErrInterruptRequested, interruptMsg) } prevScripts, err = queryer.PrevScripts(dbTx, child) if err != nil { return err } return nil }) if err != nil { return err } isTreasuryEnabled, err := queryer.IsTreasuryAgendaActive(parent.Hash()) if err != nil { return err } ntfn := &IndexNtfn{ NtfnType: ConnectNtfn, Block: child, Parent: parent, PrevScripts: prevScripts, IsTreasuryEnabled: isTreasuryEnabled, } // Relay the index update to subscribed indexes. for _, sub := range s.subscriptions { err := updateIndex(ctx, sub.idx, ntfn) if err != nil { s.cancel() return err } } cachedParent = child progressLogger.LogBlockHeight(child.MsgBlock(), parent.MsgBlock()) } log.Infof("Caught up to height %d", bestHeight) return nil } // Run relays index notifications to subscribed indexes. // // This should be run as a goroutine. func (s *IndexSubscriber) Run(ctx context.Context) { for { select { case ntfn := <-s.c: // Relay the index update to subscribed indexes. for _, sub := range s.subscriptions { err := updateIndex(ctx, sub.idx, &ntfn) if err != nil { log.Error(err) s.cancel() break } } if ntfn.Done != nil { close(ntfn.Done) } case <-ctx.Done(): log.Infof("Index subscriber shutting down") close(s.quit) // Stop all updates to subscribed indexes and terminate their // processes. for _, sub := range s.subscriptions { err := sub.stop() if err != nil { log.Error("unable to stop index subscription: %v", err) } } s.cancel() return } } }
b'What is -12 + -2 + 194 + -175?\n'
b'What is the value of 1 - -2 - (-1 + -4 + -13 + 20)?\n'
b'Calculate -4 + (-24 + 3 - -43).\n'
CREATE TABLE IF NOT EXISTS `comment` ( ) ENGINE=MyISAM AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; ;
b'Evaluate (10 + -6 + -8 - 5) + -5.\n'
package nl.ulso.sprox.json.spotify; import nl.ulso.sprox.Node; import java.time.LocalDate; import java.util.List; /** * Sprox processor for Spotify API album data. This is a very simple processor that ignores most data. * <p> * This implementation creates an Artist object for each and every artist in the response. But only the first one on * album level is kept in the end. * </p> */ public class AlbumFactory { @Node("album") public Album createAlbum(@Node("name") String name, @Node("release_date") LocalDate releaseDate, Artist artist, List<Track> tracks) { return new Album(name, releaseDate, artist, tracks); } @Node("artists") public Artist createArtist(@Node("name") String name) { return new Artist(name); } @Node("items") public Track createTrack(@Node("track_number") Integer trackNumber, @Node("name") String name) { return new Track(trackNumber, name); } }
b'Evaluate -16 - ((-3 - 0) + -12 + 11).\n'
b'5 + -1 + 2 - (15 - -4 - -11)\n'
import { Injectable } from '@angular/core'; import { Observable } from 'rxjs/Observable'; import { Gang } from '../models/gang'; import { Session } from '../models/session'; import { CREW_2_ROUTE } from '../../app/app.routes.model'; import { HttpClient } from '@angular/common/http'; @Injectable() export class CrewService { private crewOne: Observable<Gang>; private crewTwo: Observable<Gang>; constructor(private http: HttpClient) { } public getCrewDataForPath(path: string): Observable<Gang> { if (path === CREW_2_ROUTE) { return this.getCrewTwoData(); } else { return this.getCrewOneData(); } } public getCrewOneData(): Observable<Gang> { if (!this.crewOne) { this.crewOne = this.getCrew('assassins'); } return this.crewOne; } public getCrewTwoData(): Observable<Gang> { if (!this.crewTwo) { this.crewTwo = this.getCrew('assassins2'); } return this.crewTwo; } private getCrew(filename: string): Observable<Gang> { return this.http.get('/assets/data/' + filename + '.json').map((gang: Gang) => { gang.sessions = this.sortSessions(gang.sessions); return gang; }); } private sortSessions(sessions: Session[]): Session[] { return sessions.map((session: Session) => { session.date = Date.parse(<any> session.date); return session; }).sort((a, b) => a.date - b.date); } }
b'What is 97 - 64 - (11 - 3)?\n'