text
stringlengths 27
775k
|
---|
package me.pxq.eyepetizer.notification
import com.alibaba.android.arouter.facade.annotation.Route
import me.pxq.framework.router.RouterHub
import me.pxq.framework.ui.BaseTabsFragment
import me.pxq.eyepetizer.notification.adapters.ViewPagerAdapter
/**
* Description: ไธป้กต-้็ฅFragment
* Author : pxq
* Date : 2020/8/23 4:25 PM
*/
@Route(path = RouterHub.MAIN_NOTIFICATION)
class NotificationFragment : BaseTabsFragment() {
private val tabs = listOf("ๆจ้", "ไบๅจ", "็งไฟก")
override fun createAdapter() = ViewPagerAdapter(this, tabs.size)
override fun createTabs() = tabs
// ้ไธญๆจ้
override fun currentIndex() = 0
} |
package kaptainwutax.itraders.entity;
import com.mojang.authlib.GameProfile;
import kaptainwutax.itraders.net.FakeServerHandler;
import net.minecraft.world.WorldServer;
import net.minecraftforge.common.util.FakePlayer;
public class FakeUser extends FakePlayer {
public FakeUser(WorldServer world, GameProfile name) {
super(world, name);
this.connection = new FakeServerHandler(this);
}
@Override
public void resetCooldown() {
this.ticksSinceLastSwing = 20000;
}
}
|
DROP TABLE IF EXISTS `record`;
CREATE TABLE `record` (
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`flag` bigint(20) NOT NULL,
`application_name` varchar(128) DEFAULT '',
`name` varchar(128) NOT NULL,
`key` varchar(128) NOT NULL,
`class_name` varchar(256) DEFAULT '',
`response` blob,
`created_time` datetime(3) NOT NULL,
`expired_time` datetime(3) NOT NULL,
PRIMARY KEY (`id`),
KEY `domain_value_index` (`application_name`,`name`,`key`,`expired_time`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
DROP TABLE IF EXISTS `user`;
CREATE TABLE `user` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`name` varchar(128) COLLATE utf8mb4_bin DEFAULT NULL,
PRIMARY KEY (`id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; |
๏ปฟ// Template based on the Mocha testing Framework from http://visionmedia.github.io/mocha/
// Syntax based on the Should.js BDD style testing from https://github.com/visionmedia/should.js
var should = require('should');
// Synchronous Code
describe('Array', function () {
describe('#indexOf()', function () {
it('should return -1 when the value is not present', function () {
[1, 2, 3].indexOf(5).should.equal(-1);
[1, 2, 3].indexOf(0).should.equal(-1);
})
})
})
// Asynchronous Code
describe('User', function () {
describe('#save()', function () {
it('should save without error', function (done) {
var user = new User('Luna');
user.save(done);
})
})
})
// Run a specific Test Case
//describe('Array', function () {
// describe('#indexOf()', function () {
// it.only('should return -1 unless present', function () {
// })
// it('should return the index when present', function () {
// })
// })
//})
// Skip a Specific Test case
//describe('Array', function () {
// describe('#indexOf()', function () {
// it.skip('should return -1 unless present', function () {
// })
// it('should return the index when present', function () {
// })
// })
//})
|
module Main where
import Lib (run)
import System.Directory (doesFileExist, makeAbsolute)
import System.Environment (getArgs)
import qualified Data.Text as T (strip)
import qualified Data.Text.IO as T (readFile)
main :: IO ()
main =
do args <- getArgs
case args of
[filePath] ->
do absPath <- makeAbsolute filePath
exists <- doesFileExist absPath
if exists
then run . T.strip =<< T.readFile absPath
else putStrLn "The file you specified does not exist.\n\
\(Note: home directory (tilde) expansion is not supported yet.)"
_ -> putStrLn "Invalid arguments. Supply one file path, please."
|
package org.carlspring.strongbox.providers.search;
import org.carlspring.strongbox.services.ArtifactEntryService;
import org.carlspring.strongbox.storage.search.SearchRequest;
import org.carlspring.strongbox.storage.search.SearchResult;
import org.carlspring.strongbox.storage.search.SearchResults;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;
/**
* @author carlspring
*/
@Component("orientDbSearchProvider")
public class OrientDbSearchProvider
extends AbstractSearchProvider
{
private static final Logger logger = LoggerFactory.getLogger(OrientDbSearchProvider.class);
private static final String QUERY_PATTERN_DB = "([^=]+)=([^;]+);";
public static final String ALIAS = "OrientDB";
@Inject
private SearchProviderRegistry searchProviderRegistry;
@Inject
private ArtifactEntryService artifactEntryService;
@PostConstruct
@Override
public void register()
{
searchProviderRegistry.addProvider(ALIAS, this);
logger.info("Registered search provider '" + getClass().getCanonicalName() + "' with alias '" + ALIAS + "'.");
}
@Override
public String getAlias()
{
return ALIAS;
}
@Override
public SearchResults search(SearchRequest searchRequest)
throws SearchException
{
SearchResults searchResults = new SearchResults();
String query = searchRequest.getQuery();
Pattern pattern = Pattern.compile(QUERY_PATTERN_DB);
Matcher matcher = pattern.matcher(query);
if (matcher.find())
{
Map<String, String> coordinates = new HashMap<>();
do
{
coordinates.put(matcher.group(1), matcher.group(2));
}
while (matcher.find());
List<SearchResult> results = new LinkedList<>();
results.addAll(artifactEntryService.findArtifactList(searchRequest.getStorageId(),
searchRequest.getRepositoryId(),
coordinates, false)
.stream()
.map(this::createSearchResult)
.collect(Collectors.toList()));
searchResults.getResults().addAll(results);
return searchResults;
}
logger.debug("Results: {}", searchResults.getResults().size());
return searchResults;
}
}
|
package com.papsign.ktor.openapigen.annotations.type
import com.papsign.ktor.openapigen.validation.Validator
import com.papsign.ktor.openapigen.validation.ValidatorBuilder
import kotlin.reflect.KType
import kotlin.reflect.full.withNullability
open class SingleTypeValidator<A: Annotation>(allowedType: KType, private val validator: (A)-> Validator): ValidatorBuilder<A> {
private val allowedType: KType = allowedType.withNullability(false)
override fun build(type: KType, annotation: A): Validator {
if (type.withNullability(false) == allowedType) return validator(annotation)
error("${annotation::class} annotation cannot be applied to type: $type, only $allowedType is allowed")
}
}
|
package info.hannes.changelog
import com.google.gson.annotations.Expose
import com.google.gson.annotations.SerializedName
class Gitlog {
@SerializedName("version")
@Expose
var version: String? = null
@SerializedName("code")
@Expose
var code: String? = null
@SerializedName("date")
@Expose
var date: String? = null
@SerializedName("message")
@Expose
var message: String? = null
} |
/*
* Example made with love by Natxopedreira 2021
* https://github.com/natxopedreira
* Updated by members of the ZKM | Hertz-Lab 2021
*/
#include "ofApp.h"
//--------------------------------------------------------------
void ofApp::setup() {
ofSetFrameRate(60);
ofSetVerticalSync(true);
//TODO
// ofSetWindowTitle("example_movenet | singlepose lightning");
ofSetWindowTitle("example_movenet");
//https://tfhub.dev/google/movenet/singlepose/lightning/4
if(!movenet.setup("model")) {
std::exit(EXIT_FAILURE);
}
#ifdef USE_LIVE_VIDEO
// setup video grabber
video.setDesiredFrameRate(30);
video.setup(camWidth, camHeight);
#else
video.load("production ID 3873059_2.mp4");
camWidth = video.getWidth();
camHeight = video.getHeight();
video.play();
#endif
imgOut.allocate(nnWidth, nnHeight, OF_IMAGE_COLOR);
}
//--------------------------------------------------------------
void ofApp::update() {
video.update();
if(video.isFrameNew()) {
ofPixels pixels(video.getPixels());
pixels.crop((camWidth-camHeight)/2,0,camHeight,camHeight);
pixels.resize(nnWidth, nnHeight);
if(mirror) {
pixels.mirror(false, true);
}
imgOut.setFromPixels(pixels);
imgOut.update();
// feed input frame as pixels
movenet.setInput(pixels);
}
// run model on current input frame
movenet.update();
}
//--------------------------------------------------------------
void ofApp::draw() {
imgOut.draw(0, 0);
video.draw(ofGetWidth()-320,0,320,240);
movenet.draw();
ofDrawBitmapStringHighlight(ofToString((int)ofGetFrameRate()) + " fps", 4, 12);
}
//--------------------------------------------------------------
void ofApp::exit() {
movenet.stopThread();
}
//--------------------------------------------------------------
void ofApp::keyPressed(int key) {
switch(key) {
case 'm':
// toggle camera mirroring
#ifdef USE_LIVE_VIDEO
mirror = !mirror;
#endif
break;
case 'r':
// restart video
#ifndef USE_LIVE_VIDEO
video.stop();
video.play();
#endif
break;
case 't':
// toggle threading
if(movenet.isThreadRunning()) {
movenet.stopThread();
ofLogNotice() << "stopping thread";
}
else {
movenet.startThread();
ofLogNotice() << "starting thread";
}
break;
}
}
|
/*
* Copyright (c) 2015 Typelevel
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package cats
package data
abstract private[data] class AbstractNonEmptyInstances[F[_], NonEmptyF[_]](implicit
MF: Monad[F],
CF: CoflatMap[F],
TF: Traverse[F],
SF: SemigroupK[F]
) extends Bimonad[NonEmptyF]
with NonEmptyTraverse[NonEmptyF]
with SemigroupK[NonEmptyF] {
val monadInstance = MF.asInstanceOf[Monad[NonEmptyF]]
val coflatMapInstance = CF.asInstanceOf[CoflatMap[NonEmptyF]]
val traverseInstance = Traverse[F].asInstanceOf[Traverse[NonEmptyF]]
val semiGroupKInstance = SemigroupK[F].asInstanceOf[SemigroupK[NonEmptyF]]
def combineK[A](a: NonEmptyF[A], b: NonEmptyF[A]): NonEmptyF[A] =
semiGroupKInstance.combineK(a, b)
def pure[A](x: A): NonEmptyF[A] = monadInstance.pure(x)
override def map[A, B](fa: NonEmptyF[A])(f: A => B): NonEmptyF[B] = monadInstance.map(fa)(f)
def flatMap[A, B](fa: NonEmptyF[A])(f: A => NonEmptyF[B]): NonEmptyF[B] =
monadInstance.flatMap(fa)(f)
override def map2[A, B, Z](fa: NonEmptyF[A], fb: NonEmptyF[B])(f: (A, B) => Z): NonEmptyF[Z] =
monadInstance.map2(fa, fb)(f)
override def map2Eval[A, B, Z](fa: NonEmptyF[A], fb: Eval[NonEmptyF[B]])(f: (A, B) => Z): Eval[NonEmptyF[Z]] =
monadInstance.map2Eval(fa, fb)(f)
def coflatMap[A, B](fa: NonEmptyF[A])(f: NonEmptyF[A] => B): NonEmptyF[B] =
coflatMapInstance.coflatMap(fa)(f)
def tailRecM[A, B](a: A)(f: A => NonEmptyF[Either[A, B]]): NonEmptyF[B] =
monadInstance.tailRecM(a)(f)
def foldLeft[A, B](fa: NonEmptyF[A], b: B)(f: (B, A) => B): B =
traverseInstance.foldLeft(fa, b)(f)
def foldRight[A, B](fa: NonEmptyF[A], lb: Eval[B])(f: (A, Eval[B]) => Eval[B]): Eval[B] =
traverseInstance.foldRight(fa, lb)(f)
override def foldMap[A, B](fa: NonEmptyF[A])(f: A => B)(implicit B: Monoid[B]): B =
traverseInstance.foldMap(fa)(f)
override def traverse[G[_], A, B](fa: NonEmptyF[A])(f: A => G[B])(implicit G: Applicative[G]): G[NonEmptyF[B]] =
traverseInstance.traverse(fa)(f)
override def mapWithIndex[A, B](fa: NonEmptyF[A])(f: (A, Int) => B): NonEmptyF[B] =
traverseInstance.mapWithIndex(fa)(f)
override def zipWithIndex[A](fa: NonEmptyF[A]): NonEmptyF[(A, Int)] = traverseInstance.zipWithIndex(fa)
override def exists[A](fa: NonEmptyF[A])(p: A => Boolean): Boolean = traverseInstance.exists(fa)(p)
override def forall[A](fa: NonEmptyF[A])(p: A => Boolean): Boolean = traverseInstance.forall(fa)(p)
override def get[A](fa: NonEmptyF[A])(idx: Long): Option[A] = traverseInstance.get(fa)(idx)
override def isEmpty[A](fa: NonEmptyF[A]): Boolean = false
override def foldM[G[_], A, B](fa: NonEmptyF[A], z: B)(f: (B, A) => G[B])(implicit G: Monad[G]): G[B] =
traverseInstance.foldM(fa, z)(f)
override def fold[A](fa: NonEmptyF[A])(implicit A: Monoid[A]): A = traverseInstance.fold(fa)
override def toList[A](fa: NonEmptyF[A]): List[A] = traverseInstance.toList(fa)
override def reduceLeftOption[A](fa: NonEmptyF[A])(f: (A, A) => A): Option[A] =
traverseInstance.reduceLeftOption(fa)(f)
override def find[A](fa: NonEmptyF[A])(f: A => Boolean): Option[A] = traverseInstance.find(fa)(f)
override def collectFirst[A, B](fa: NonEmptyF[A])(pf: PartialFunction[A, B]): Option[B] =
traverseInstance.collectFirst(fa)(pf)
override def collectFirstSome[A, B](fa: NonEmptyF[A])(f: A => Option[B]): Option[B] =
traverseInstance.collectFirstSome(fa)(f)
}
|
package com.learn.concurrency.threadpool;
import java.util.ArrayList;
import java.util.List;
public class ThreadPoolExample {
public static void main(String[] args) {
List<Thread> threads = new ArrayList<Thread>();
for(int i=0; i<500;i++) {
Runnable task = new Task(100000L+i);
Thread thread = new Thread(task);
thread.setName(String.valueOf(i));
thread.start();
threads.add(thread);
}
}
}
|
package gleak
import (
"fmt"
"strings"
"github.com/onsi/gomega/format"
"github.com/onsi/gomega/types"
)
// IgnoringCreator succeeds if the goroutine was created by a function matching
// the specified name. The expected creator function name is either in the form
// of "creatorfunction-name" or "creatorfunction-name...".
//
// An ellipsis "..." after a creatorfunction-name matches any creator function
// name if creatorfunction-name is a prefix and the goroutine's creator function
// name is at least one level deeper. For instance, "foo.bar..." matches
// "foo.bar.baz", but doesn't match "foo.bar".
func IgnoringCreator(creatorfname string) types.GomegaMatcher {
if strings.HasSuffix(creatorfname, "...") {
expectedCreatorFunction := creatorfname[:len(creatorfname)-3+1] // ...one trailing dot still expected
return &ignoringCreator{
expectedCreatorFunction: expectedCreatorFunction,
matchPrefix: true,
}
}
return &ignoringCreator{
expectedCreatorFunction: creatorfname,
}
}
type ignoringCreator struct {
expectedCreatorFunction string
matchPrefix bool
}
// Match succeeds if an actual goroutine's creator function in the backtrace
// matches the specified function name or function name prefix.
func (matcher *ignoringCreator) Match(actual interface{}) (success bool, err error) {
g, err := G(actual, "IgnoringCreator")
if err != nil {
return false, err
}
if matcher.matchPrefix {
return strings.HasPrefix(g.CreatorFunction, matcher.expectedCreatorFunction), nil
}
return g.CreatorFunction == matcher.expectedCreatorFunction, nil
}
// FailureMessage returns a failure message if the actual goroutine doesn't have
// the specified function name/prefix (and optional state) at the top of the
// backtrace.
func (matcher *ignoringCreator) FailureMessage(actual interface{}) (message string) {
return format.Message(actual, matcher.message())
}
// NegatedFailureMessage returns a failure message if the actual goroutine has
// the specified function name/prefix (and optional state) at the top of the
// backtrace.
func (matcher *ignoringCreator) NegatedFailureMessage(actual interface{}) (message string) {
return format.Message(actual, "not "+matcher.message())
}
func (matcher *ignoringCreator) message() string {
if matcher.matchPrefix {
return fmt.Sprintf("to be created by a function with prefix %q", matcher.expectedCreatorFunction)
}
return fmt.Sprintf("to be created by %q", matcher.expectedCreatorFunction)
}
|
๏ปฟnamespace TimeWarp.Architecture.Enumerations;
using System.Reflection;
/// <summary>
/// a base class for creating Enumerations.
/// https://gist.github.com/slovely/1076365
/// https://lostechies.com/jimmybogard/2008/08/12/enumeration-classes/
/// </summary>
public abstract class Enumeration : IComparable
{
//protected Enumeration() { }
protected Enumeration(int aValue, string aName, List<string>? aAlternateCodes)
{
Value = aValue;
Name = aName;
AlternateCodes = aAlternateCodes ?? new List<string>();
}
public List<string> AlternateCodes { get; }
public string Name { get; }
public int Value { get; }
/// <summary>
/// Get the EnumerationItem form an alternate code.
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="aAlternateCode"></param>
/// <returns></returns>
public static T? FromAlternateCode<T>(string aAlternateCode) where T : Enumeration
{
T? matchingItem =
Parse<T, string>
(
aAlternateCode,
"alternate code",
aItem => aItem.AlternateCodes.Contains(aAlternateCode)
);
return matchingItem;
}
/// <summary>
/// Get the EnumerationItem from its Name
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="aName"></param>
/// <returns></returns>
public static T? FromName<T>(string aName) where T : Enumeration
{
T? matchingItem = Parse<T, string>(aName, "name", aItem => aItem.Name == aName);
return matchingItem;
}
/// <summary>
/// Get the EnumerationItem from a display name, alternate code or value.
/// </summary>
/// <typeparam name="T"></typeparam>
/// <returns></returns>
public static T? FromString<T>(string aString) where T : Enumeration
{
T? matchingItem = Parse<T, string>(aString, "", aItem =>
aItem.Name == aString ||
aItem.AlternateCodes.Contains(aString)
);
return matchingItem;
}
/// <summary>
/// Get the EnumerationItem from is value
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="aValue"></param>
/// <returns></returns>
public static T? FromValue<T>(int aValue) where T : Enumeration
{
T? matchingItem = Parse<T, int>(aValue, "value", item => item.Value == aValue);
return matchingItem;
}
public static IEnumerable<T> GetAll<T>() where T : Enumeration
{
Type type = typeof(T);
FieldInfo[] fields = type.GetFields(BindingFlags.Public | BindingFlags.Static | BindingFlags.DeclaredOnly);
return fields.Select(info => info.GetValue(null)).OfType<T>();
}
public int CompareTo(object? aOther) => Value.CompareTo(((Enumeration?)aOther)?.Value);
public override bool Equals(object? aObject)
{
if (aObject is not Enumeration otherValue) return false;
bool typeMatches = GetType().Equals(aObject?.GetType());
bool valueMatches = Value.Equals(otherValue.Value);
return typeMatches && valueMatches;
}
public override int GetHashCode() => Value.GetHashCode();
public override string ToString() => Name;
protected static T? Parse<T, K>(K aValue, string aDescription, Func<T, bool> aPredicate) where T : Enumeration
{
T? matchingItem = GetAll<T>().FirstOrDefault(aPredicate);
if (matchingItem is null)
{
string message = $"'{aValue}' is not a valid {aDescription} in {typeof(T)}";
throw new Exception(message);
}
return matchingItem;
}
}
|
#include "modules/pipeline/dataset_path.h"
#include "modules/pipeline/primitives.h"
#include "modules/pipeline/dataset_meta.h"
#include "gtest/gtest.h"
#include "modules/web/couchdb.h"
#include "modules/io/config.h"
#include "modules/pipeline/ottoman.h"
class dataset_test : public ::testing::Test {
public:
void SetUp() override {
if (!os) {
os.reset(new ottoman_server());
}
}
private:
// Dataset tests depend on ottoman being accessible, but ottoman
// server doesn't like being taken down.
static std::unique_ptr<ottoman_server> os;
};
std::unique_ptr<ottoman_server> dataset_test::os;
TEST_F(dataset_test, basic)
{
dataset_path p("/api/users/spiral_tester/data/test_a");
ASSERT_EQ(p.friendly().compare("/test_a"), 0);
ASSERT_EQ(p.url().compare("/api/users/spiral_tester/data/test_a"), 0);
ASSERT_EQ(p.user().compare("spiral_tester"), 0);
ASSERT_EQ(p.parent().compare("/api/users/spiral_tester/data"), 0);
ASSERT_EQ(p.name().compare("test_a"), 0);
ASSERT_EQ(p.root().append(p.name()).url(), p.url());
ASSERT_FALSE(p.is_reference());
}
void check_ls(const std::string& path, size_t expected_size)
{
dataset_path dp(path);
std::vector<direntry> listing = dp.list_dir();
ASSERT_EQ(listing.size(), expected_size);
}
void recursive_rmdir(const dataset_path& dp)
{
auto exists = dp.exists();
if (exists == path::e_directory) {
std::vector<direntry> listing = dp.list_dir();
for (const auto& item : listing) {
dataset_path child(item.url);
recursive_rmdir(child);
}
dp.remove();
} else if (exists == path::e_file) {
dp.remove();
}
}
TEST_F(dataset_test, need_couchdb)
{
gen_cache(nullptr);
dataset_path root("/api/users/spiral_tester/data");
recursive_rmdir(root);
dataset_path p("/api/users/spiral_tester/data/test_a/my_dir/mo_dir");
ASSERT_NO_THROW(p.remove());
ASSERT_EQ(p.exists(), path::e_no_exist);
check_ls("/api/users/spiral_tester/data", 0);
check_ls("/api/users/spiral_tester/data/test_a", 0);
check_ls("/api/users/spiral_tester/data/test_a/my_dir", 0);
check_ls("/api/users/spiral_tester/data/test_a/my_dir/mo_dir", 0);
// triggers the recursive mkdirs for test_a , my_dir and mo_dir
ASSERT_NO_THROW(p.mkdir());
check_ls("/api/users/spiral_tester/data", 1);
check_ls("/api/users/spiral_tester/data/test_a", 1);
check_ls("/api/users/spiral_tester/data/test_a/my_dir", 1);
check_ls("/api/users/spiral_tester/data/test_a/my_dir/mo_dir", 0);
dataset_path p_dir("/api/users/spiral_tester/data/test_a");
dataset_path p_my_dir(p_dir.append("my_dir"));
ASSERT_EQ(p_dir.exists(), path::e_directory);
ASSERT_EQ(p_my_dir.exists(), path::e_directory);
ASSERT_NO_THROW(p.remove());
check_ls("/api/users/spiral_tester/data", 1);
check_ls("/api/users/spiral_tester/data/test_a", 1);
check_ls("/api/users/spiral_tester/data/test_a/my_dir", 0);
check_ls("/api/users/spiral_tester/data/test_a/my_dir/mo_dir", 0);
ASSERT_EQ(p_my_dir.exists(), path::e_directory);
ASSERT_NO_THROW(p_my_dir.remove());
check_ls("/api/users/spiral_tester/data", 1);
check_ls("/api/users/spiral_tester/data/test_a", 0);
check_ls("/api/users/spiral_tester/data/test_a/my_dir", 0);
check_ls("/api/users/spiral_tester/data/test_a/my_dir/mo_dir", 0);
ASSERT_NO_THROW(p_dir.remove());
check_ls("/api/users/spiral_tester/data", 0);
check_ls("/api/users/spiral_tester/data/test_a", 0);
check_ls("/api/users/spiral_tester/data/test_a/my_dir", 0);
check_ls("/api/users/spiral_tester/data/test_a/my_dir/mo_dir", 0);
ASSERT_EQ(p_dir.exists(), path::e_no_exist);
ASSERT_EQ(p_my_dir.exists(), path::e_no_exist);
ASSERT_EQ(p.exists(), path::e_no_exist);
dataset_path p2("/api/users/spiral_tester/data/my_file");
dataset_meta dm;
dm.type = datatype_registry::find("unaligned_reads");
ASSERT_NO_THROW(p2.remove());
ASSERT_NO_THROW(p2.create(dm));
ASSERT_THROW(p2.mkdir(), io_exception);
dataset_path p3(p2.append("dir3"));
ASSERT_ANY_THROW(p3.mkdir());
dataset_path p4("/api/users/spiral_tester/data");
std::vector<direntry> listing = p4.list_dir();
ASSERT_EQ(listing.size(), (size_t)1);
if (listing.size() == 1) {
ASSERT_EQ(listing[0]._id, "/api/users/spiral_tester/data/my_file");
}
dataset_path p5("/api/users/spiral_tester/data/my_dir");
ASSERT_NO_THROW(p5.mkdir());
listing = p4.list_dir();
ASSERT_EQ(listing.size(), (size_t)2);
if (listing.size() == 2) {
ASSERT_EQ(listing[0]._id, "/api/users/spiral_tester/data/my_dir");
ASSERT_EQ(listing[1]._id, "/api/users/spiral_tester/data/my_file");
}
auto foo = p5.append("foo");
foo.mkdir();
ASSERT_NO_THROW( p4.remove(true) );
ASSERT_EQ( path::e_no_exist, foo.exists() );
ASSERT_EQ( path::e_no_exist, p2.exists() );
ASSERT_EQ( path::e_no_exist, p5.exists() );
ASSERT_EQ( path::e_no_exist, p4.exists() );
}
|
SELECT `FEE`,`FFEE` FROM fee WHERE `DTYPE` = "ItemFee" and `FEE` != `FFEE`;
-- UPDATE fee SET `FFEE`=`FEE` Where `ID`>0 and `DTYPE` = "ItemFee"; |
import 'package:equations/equations.dart';
void main() {
// f(x) = x^3 + 7x^2 + 2x - 5
final cubic = Cubic.realEquation(
b: 7,
c: 2,
d: -5,
);
print('$cubic'); // f(x) = 1x^3 + 7x^2 + 2x + -5
print('discriminant: ${cubic.discriminant()}'); // 5089
print('derivative: ${cubic.derivative()}'); // 3x^2 + 14x + 2
print('degree: ${cubic.degree}'); // 3
print('valid input? ${cubic.isValid}'); // true
print('are all coefficients real? ${cubic.isRealEquation}\n'); // true
for (final sol in cubic.solutions()) {
print(' > x = $sol');
}
print('\n ============ \n');
// f(x) = ix^2 + (8 - 3i)
final quadratic = Algebraic.from(const [
Complex.i(),
Complex.zero(),
Complex(8, -3),
]);
print('$quadratic'); // f(x) = 1ix^2 + (8 - 3i)
print('discriminant: ${quadratic.discriminant()}'); // -12 - 32i
print('derivative: ${quadratic.derivative()}'); // 2ix
print('degree: ${quadratic.degree}'); // 2
print('valid input? ${quadratic.isValid}'); // true
print('are all coefficients real? ${quadratic.isRealEquation}\n'); // false
for (final sol in quadratic.solutions()) {
print(' > x = $sol');
}
}
|
๏ปฟ// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using Google.Apis.Logging;
using Google.Cloud.PubSub.V1;
using Google.Cloud.Translation.V2;
using static Google.Apis.Http.ConfigurableMessageHandler;
using Grpc.Core;
using System;
using Google.Api.Gax.ResourceNames;
using Google.Cloud.Vision.V1;
using System.Collections.Generic;
using Google.Cloud.Scheduler.V1;
// Unfortunately Scheduler now has a TopicName class too; this is just an unfortunate combination of APIs
// to pick snippets from.
using TopicName = Google.Cloud.PubSub.V1.TopicName;
namespace Google.Cloud.Tools.Snippets
{
public class FaqSnippets
{
public void Emulator()
{
// Sample: Emulator
// [START pubsub_use_emulator]
// For example, "localhost:8615"
string emulatorHostAndPort = Environment.GetEnvironmentVariable("PUBSUB_EMULATOR_HOST");
Channel channel = new Channel(emulatorHostAndPort, ChannelCredentials.Insecure);
PublisherServiceApiClient client = PublisherServiceApiClient.Create(channel);
client.CreateTopic(new TopicName("project", "topic"));
foreach (var topic in client.ListTopics(new ProjectName("project")))
{
Console.WriteLine(topic.Name);
}
// [END pubsub_use_emulator]
// End sample
}
public void RestLogging()
{
// Sample: RestLogging
// Required using directives:
// using static Google.Apis.Http.ConfigurableMessageHandler;
// using Google.Apis.Logging;
// using Google.Cloud.Translation.V2;
// Register a verbose console logger
ApplicationContext.RegisterLogger(new ConsoleLogger(LogLevel.All));
// Create a translation client
TranslationClient client = TranslationClient.Create();
// Configure which events the message handler will log.
client.Service.HttpClient.MessageHandler.LogEvents =
LogEventType.RequestHeaders | LogEventType.ResponseBody;
// Make the request
client.ListLanguages();
// End sample
}
public void ProtoRepeatedField1()
{
// Sample: ProtoRepeatedField1
// In normal code you'd populate these individual requests with more
// information.
AnnotateImageRequest request1 = new AnnotateImageRequest();
AnnotateImageRequest request2 = new AnnotateImageRequest();
// Create the batch request using an object initializer
BatchAnnotateImagesRequest batch = new BatchAnnotateImagesRequest
{
// Populate the repeated field with a collection initializer
Requests = { request1, request2 }
};
// End sample
}
public void ProtoRepeatedField2()
{
// Sample: ProtoRepeatedField2
// In normal code you'd populate these individual requests with more
// information.
AnnotateImageRequest request1 = new AnnotateImageRequest();
AnnotateImageRequest request2 = new AnnotateImageRequest();
// Populate the batch without using an object initializer, just by calling
// Add on the repeated field
BatchAnnotateImagesRequest batch = new BatchAnnotateImagesRequest();
batch.Requests.Add(request1);
batch.Requests.Add(request2);
// End sample
}
public void ProtoRepeatedField3()
{
// Sample: ProtoRepeatedField3
// In normal code you'd populate these individual requests with more
// information.
List<AnnotateImageRequest> requests = new List<AnnotateImageRequest>
{
new AnnotateImageRequest(),
new AnnotateImageRequest()
};
// Create the batch request using an object initializer
BatchAnnotateImagesRequest batch = new BatchAnnotateImagesRequest
{
// Populate the repeated field using the Add overload that accepts
// an IEnumerable<T>
Requests = { requests }
};
// End sample
}
public void ProtoMap1()
{
// Sample: ProtoMap1
HttpTarget target = new HttpTarget
{
Headers =
{
{ "X-Custom-Header1", "Value1" },
{ "X-Custom-Header2", "Value2" },
}
};
// End sample
}
public void ProtoMap2()
{
// Sample: ProtoMap2
HttpTarget target = new HttpTarget
{
Headers =
{
["X-Custom-Header1"] = "Value1",
["X-Custom-Header2"] = "Value2",
}
};
// End sample
}
public void ProtoMap3()
{
// Sample: ProtoMap3
HttpTarget target = new HttpTarget();
target.Headers["X-Custom-Header1"] = "Value1";
target.Headers["X-Custom-Header2"] = "Value2";
// End sample
}
}
}
|
using Tabben: num_examples, num_inputs, num_outputs, num_classes, task,
has_extras, has_extra, license, bibtex
@testset "classification dataset tests" begin
ns = [5, 10, 17, 100, 1000]
ms = [5, 10, 17, 100, 1000, 5000]
ks = [1, 2, 3, 5, 10]
cs = [2, 3, 5, 10, 100]
for n in ns, m in ms, k in ks, c in cs
ds = random_classification_dataset(n, m, k, c)
@test num_examples(ds) == n == length(ds) == lastindex(ds)
@test num_inputs(ds) == m
@test num_outputs(ds) == k
@test num_classes(ds) == c
@test task(ds) == "classification"
@test has_extras(ds)
for extra_name in ("license", "bibtex", "column-names-attributes", "column-names-target")
@test has_extra(ds, extra_name)
end
@test !has_extra(ds, "other")
@test !has_extra(ds, "blah")
@test license(ds) == "test license"
@test bibtex(ds) == "test bibtex"
count = 0
for i in 1:length(ds)
single_input, single_output = ds[i]
@test length(single_input) == m
@test length(single_output) == k
@test all(val in 0:c-1 for val in single_output)
count += 1
end
@test count == n
count = 0
for (single_input, single_output) in ds
@test length(single_input) == m
@test length(single_output) == k
@test all(val in 0:c-1 for val in single_output)
count += 1
end
@test count == n
end
end
|
import { mapGroup } from '@givto/api/graphql-mappers';
import { Group, ResolverObject, User } from '../../graphql-schema';
export const userResolver: ResolverObject<User> = {
async groups(user, _, { dataSources: { groups } }): Promise<Group[]> {
console.log('resolve groups for user', user.email);
const mongoGroups = await groups.findByIds(user.groups);
return mongoGroups.map((group) => mapGroup(group, user.id));
},
};
|
using System;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
namespace NRealbit.Scripting.Parser
{
internal class StringInput : IInput<char>
{
public StringInput(string source) : this(source, 0) { }
public string Source { get; }
public int Position { get; }
internal StringInput(string source, int position)
{
if (source == null)
throw new System.ArgumentNullException(nameof(source));
Source = source;
Position = position;
Memos = new Dictionary<object, object>();
}
public bool AtEnd { get { return Position == Source.Length; } }
public char GetCurrent() => Source[Position];
public IInput<char> Advance()
{
if (AtEnd)
throw new InvalidOperationException("The input is already at the end of the source");
return new StringInput(Source, Position + 1);
}
public IEnumerator<char> GetEnumerator()
{
var arr = (IEnumerable<char>)(Source).ToCharArray();
return arr.GetEnumerator();
}
IEnumerator IEnumerable.GetEnumerator()
{
return (Source).ToCharArray().GetEnumerator();
}
public IDictionary<object, object> Memos { get; }
}
} |
# Introduction to incisor-cell-counter
Processing pipeline was invoked like:
lein run -m incisor-cell-counter.imaris-cropped-analysis "/home/timepoint/directory" "imagefilename";
lein run -m incisor-cell-counter.csv-generator "/home/timepoint/directory" "imagefilename"
|
// Copyright 2021 The Terasology Foundation
// SPDX-License-Identifier: Apache-2.0
package org.terasology.persistence.typeHandling.coreTypes;
import com.google.common.collect.Maps;
import org.terasology.persistence.typeHandling.PersistedData;
import org.terasology.persistence.typeHandling.PersistedDataSerializer;
import org.terasology.persistence.typeHandling.TypeHandler;
import java.util.Map;
import java.util.Optional;
public class StringMapTypeHandler<T> extends TypeHandler<Map<String, T>> {
private TypeHandler<T> contentsHandler;
public StringMapTypeHandler(TypeHandler contentsHandler) {
this.contentsHandler = contentsHandler;
}
@Override
public PersistedData serializeNonNull(Map<String, T> value, PersistedDataSerializer serializer) {
Map<String, PersistedData> map = Maps.newLinkedHashMap();
for (Map.Entry<String, T> entry : value.entrySet()) {
PersistedData item = contentsHandler.serialize(entry.getValue(), serializer);
if (!item.isNull()) {
map.put(entry.getKey(), item);
}
}
return serializer.serialize(map);
}
@Override
public Optional<Map<String, T>> deserialize(PersistedData data) {
if (!data.isValueMap()) {
return Optional.empty();
}
Map<String, T> result = Maps.newLinkedHashMap();
for (Map.Entry<String, PersistedData> item : data.getAsValueMap().entrySet()) {
Optional<T> optionalValue = contentsHandler.deserialize(item.getValue());
optionalValue.ifPresent(value -> result.put(item.getKey(), value));
}
return Optional.of(result);
}
}
|
# `dank-scoop` bucket
## Usage
To add this bucket to scoop, run the following command in PowerShell:
```
scoop bucket add dank-scoop https://github.com/brian6932/dank-scoop/
```
|
/*
* Copyright (c) 2011-2018 Contributors to the Eclipse Foundation
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License 2.0 which is available at
* http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
* which is available at https://www.apache.org/licenses/LICENSE-2.0.
*
* SPDX-License-Identifier: EPL-2.0 OR Apache-2.0
*/
package io.vertx.ext.web.client;
import io.vertx.codegen.annotations.Fluent;
import io.vertx.codegen.annotations.GenIgnore;
import io.vertx.codegen.annotations.VertxGen;
import io.vertx.ext.web.client.impl.WebClientSessionAware;
import io.vertx.ext.web.client.spi.CookieStore;
/**
* An asynchronous sessions aware HTTP / HTTP/2 client called {@code WebClientSession}.
* <p>
* This client wraps a {@link WebClient} and makes it session aware adding features to it:
* <ul>
* <li>Per client headers, to be send with every request</li>
* <li>Per client cookies, to be send with every request</li>
* <li>Automatic storage and sending of cookies received from the server(s)</li>
* </ul>
* <p>
* The client honors the cookies attributes:
* <ul>
* <li>domain</li>
* <li>path</li>
* <li>secure</li>
* <li>max-age and expires</li>
* </ul>
* <p/>
*
* @author <a href="mailto:[email protected]">Tommaso Nolli</a>
*/
@VertxGen
public interface WebClientSession extends WebClient {
/**
* Create a session aware web client using the provided {@code webClient} instance.
*
* @param webClient the web client instance
* @return the created client
*/
static WebClientSession create(WebClient webClient) {
return create(webClient, CookieStore.build());
}
/**
* Create a session aware web client using the provided {@code webClient} instance.
*
* @param webClient the web client instance
* @return the created client
*/
@GenIgnore
static WebClientSession create(WebClient webClient, CookieStore cookieStore) {
return new WebClientSessionAware(webClient, cookieStore);
}
/**
* Configure the client to add an HTTP header to every request.
*
* @param name the header name
* @param value the header value
* @return a reference to this, so the API can be used fluently
*/
@Fluent
@GenIgnore
WebClientSession addHeader(CharSequence name, CharSequence value);
/**
* Configure the client to add an HTTP header to every request.
*
* @param name the header name
* @param value the header value
* @return a reference to this, so the API can be used fluently
*/
@Fluent
WebClientSession addHeader(String name, String value);
/**
* Configure the client to add an HTTP header to every request.
*
* @param name the header name
* @param values the header value
* @return a reference to this, so the API can be used fluently
*/
@Fluent
@GenIgnore
WebClientSession addHeader(CharSequence name, Iterable<CharSequence> values);
/**
* Configure the client to add an HTTP header to every request.
*
* @param name the header name
* @param values the header value
* @return a reference to this, so the API can be used fluently
*/
@Fluent
@GenIgnore
WebClientSession addHeader(String name, Iterable<String> values);
/**
* Removes a previously added header.
*
* @param name the header name
* @return a reference to this, so the API can be used fluently
*/
@Fluent
@GenIgnore
WebClientSession removeHeader(CharSequence name);
/**
* Removes a previously added header.
*
* @param name the header name
* @return a reference to this, so the API can be used fluently
*/
@Fluent
WebClientSession removeHeader(String name);
/**
* Returns this client's {@code CookieStore}
* <p>
* All cookies added to this store will be send with every request.
* The CookieStore honors the domain, path, secure and max-age properties of received cookies
* and is automatically updated with cookies present in responses received by this client.
* @return this client's cookie store
*/
@GenIgnore
CookieStore cookieStore();
}
|
#!/bin/bash
CONDA_ENVNAME=${1:-'gatk4'}
CONDA_INSTALLER=Miniconda3-latest-Linux-x86_64.sh
CONDA_DOWNLOAD_URL=https://repo.anaconda.com/miniconda/${CONDA_INSTALLER}
GATK_VERSION="4.1.4.0"
GATK_TARBALL="gatk-${GATK4_VERSION}.zip"
GATK_DOWNLOAD_BASEURL="https://github.com/broadinstitute/gatk/releases/download"
GATK_DOWNLOAD_URL="${GATK4_DOWNLOAD_BASEURL}/${GATK_VERSION}/${GATK_TARBALL}"
GATK_REQUIRES="matplotlib pandas bleach patsy pysam pymc3 tqdm"
GATK_PACKAGES="gatk4=${GATK_VERSION} pyvcf keras scikit-learn theano bwa samtools htslib"
[ -f ${HOME}/${CONDA_INSTALLER} ] || {
wget -O ${HOME}/${CONDA_INSTALLER} ${CONDA_DOWNLOAD_URL}
}
[ -d ${HOME}/conda ] || {
bash ${CONDA_INSTALLER} -b -p ${HOME}/conda
}
export PATH=${HOME}/conda/condabin:${PATH}
[ -f ${HOME}/${GATK_TARBALL} ] || {
pushd ${HOME}
wget -nv ${GATK_DOWNLOAD_URL}
unzip ${GATK_TARBALL}
popd
}
[ -d ${HOME}/conda/envs/${CONDA_ENVNAME} ] || {
source $HOME/conda/etc/profile.d/conda.sh
conda create -y -n ${CONDA_ENVNAME} python=3.6 pip
conda activate ${CONDA_ENVNAME}
conda install -y -c bioconda ${GATK_PACKAGES} ${GATK_REQUIRES}
pip install ${HOME}/gatk-${GATK_VERSION}/gatkPythonPackageArchive.zip
conda deactivate
}
|
module GraphqlPlayground
class PlaygroundsController < ActionController::Base
def show
end
end
end
|
# frozen_string_literal: true
module PhcStringFormat
#
# Parser for parsing PHC-string-format.
#
class PhcString
include Validations
def self.parse(string)
string ||= ''
PhcString.new(*split(string))
rescue StandardError => e
raise ParseError, e.message
end
# :reek:DuplicateMethodCall { allow_calls: ['elements.shift', 'elements.first'] }
def self.split(string)
elements = string.split(/\$/, 6)
elements.shift
[
elements.shift,
(elements.shift if (elements.first || '').start_with?('v=')),
(elements.shift if (elements.first || '').include?('=')),
elements.shift,
elements.shift
]
end
def self.create(id:, version: nil, params: nil, salt: nil, hash: nil, hint: {})
PhcString.new \
id,
(Parameters.to_s(v: version) if version),
(Parameters.to_s(params) if params),
hint.dig(:salt, :encoding) == '7bit' ? salt : B64.encode(salt),
B64.encode(hash)
end
private_class_method :split
validates :@id, message: 'id is non-compliant', format: { with: /\A[a-z0-9-]{1,32}\z/ }
validates \
:@version_string,
message: 'version is non-compliant',
allow_nil: true,
format: { with: /\Av=\d+\z/ }
validate :validate_params_string, message: 'parameters is non-compliant'
validates \
:@encoded_salt,
message: 'encoded salt is non-compliant',
allow_nil: true,
format: { with: %r{\A[a-zA-Z0-9/+.-]*\z} }
validates \
:@encoded_hash,
message: 'encoded hash is non-compliant',
allow_nil: true,
format: { with: %r{\A[a-zA-Z0-9/+]*\z} }
validate :validate_salt_and_hash, message: 'hash needs salt'
def initialize(id, version_string, params_string, encoded_salt, encoded_hash)
@id = id
@version_string = version_string
@params_string = params_string
@encoded_salt = encoded_salt
@encoded_hash = encoded_hash
self.class.do_validate self
end
def to_s
"$#{[
@id,
@version_string,
@params_string,
@encoded_salt,
@encoded_hash
].compact.join('$')}"
end
# rubocop:disable Metrics/PerceivedComplexity, Metrics/CyclomaticComplexity
def to_h(pick: nil, hint: {})
pick ||= %i[id version params salt hash]
{
id: (@id if pick.include?(:id)),
version: (Parameters.to_h(@version_string)['v'] if pick.include?(:version)),
params: (Parameters.to_h(@params_string) if pick.include?(:params)),
salt:
if pick.include?(:salt)
hint.dig(:salt, :encoding) == '7bit' ? @encoded_salt : B64.decode(@encoded_salt)
end,
hash: (B64.decode(@encoded_hash) if pick.include?(:hash))
}.select { |_, value| value }
end
# rubocop:enable Metrics/PerceivedComplexity, Metrics/CyclomaticComplexity
def ==(other)
instance_variable_values = other.instance_variables.map { |name| other.instance_variable_get(name) }
instance_variable_values == instance_variables.map { |name| instance_variable_get(name) }
end
private
def validate_params_string
!@params_string || !@params_string.empty? && @params_string.split(',').all? \
{ |param| param =~ %r{\A[a-z0-9-]{1,32}=[a-zA-Z0-9/+.-]+\z} }
end
def validate_salt_and_hash
@encoded_salt || !@encoded_hash
end
#
# PHC string parameters
#
module Parameters
def self.to_s(params)
params ||= {}
params.map { |param| param.join '=' }.join(',')
end
def self.to_h(params_string)
params_string ||= ''
params_string # rubocop:disable Style/HashTransformValues
.split(/,/)
.map { |param| param.split '=' }
.map { |name, value| [name, value =~ /\A-?\d+(.\d+)?\Z/ ? value.to_i : value] }
.to_h
end
end
end
#
# This exception is raised if a parser error occurs.
#
class ParseError < StandardError; end
end
|
use strict;
use warnings;
use Test::More;
use lib qw( t/lib );
use DBICTest;
use DBICTest::Constants qw/ THROW_EXCEPTION_MESSAGE /;
BEGIN {
eval "use DBD::SQLite";
plan $@
? ( skip_all => 'needs DBD::SQLite for testing' )
: ( tests => 684 );
}
## slave
my $schema = DBICTest->init_schema;
my $message = THROW_EXCEPTION_MESSAGE;
my $itr_s_artist = $schema->resultset('Artist::Slave')->search;
while ( my $s_artist = $itr_s_artist->next ) {
is($s_artist->is_slave,1,'slave artist "delete"');
eval{$s_artist->delete};
like($@,qr/$message/,'slave artist "delete"');
}
my $itr_s_cd = $schema->resultset('CD::Slave')->search;
while ( my $s_cd = $itr_s_cd->next ) {
is($s_cd->is_slave,1,'slave cd "delete"');
eval{$s_cd->delete};
like($@,qr/$message/,'slave cd "delete"');
}
my $itr_s_track = $schema->resultset('Track::Slave')->search;
while ( my $s_track = $itr_s_track->next ) {
is($s_track->is_slave,1,'slave track "delete"');
eval{$s_track->delete};
like($@,qr/$message/,'slave track "delete"');
}
## master
my $itr_m_artist = $schema->resultset('Artist')->search;
while ( my $m_artist = $itr_m_artist->next ) {
is($m_artist->is_slave,0,'master artist "delete"');
$m_artist->delete;
}
my $itr_m_artist_deleted = $schema->resultset('Artist')->search;
is($itr_m_artist_deleted->first,undef,'master artist "delete"');
my $itr_m_cd = $schema->resultset('CD')->search;
while ( my $m_cd = $itr_m_cd->next ) {
is($m_cd->is_slave,0,'master cd "delete"');
$m_cd->delete;
}
my $itr_m_cd_deleted = $schema->resultset('CD')->search;
is($itr_m_cd_deleted->first,undef,'master cd "delete"');
my $itr_m_track = $schema->resultset('Track')->search;
while ( my $m_track = $itr_m_track->next ) {
is($m_track->is_slave,0,'master track "delete"');
$m_track->delete;
}
my $itr_m_track_deleted = $schema->resultset('Track')->search;
is($itr_m_track_deleted->first,undef,'master track "delete"');
|
from HDUCoursesAPI.timetable import dict_course_start, dict_course_end
import json
import re
def make_json(data: list) -> list:
need_deserialization = ['time_info', 'week_info', 'location', 'other']
for one in data:
for i in need_deserialization:
one[i] = one[i].replace("'", "\"")
one[i] = one[i].replace("\\xa0", '')
one[i] = json.loads(one[i])
return data
# ๅคๆญๆฏๅฆไธบๅถๆฐ
def is_even(num):
if num % 2 == 0:
return True
return False
def parse_week(time_info: str, start_end: str) -> dict:
week_pattern = re.compile(r'{[^}]+}')
data = {
'start': 0,
'end': 0,
'flag': 0
}
if time_info != "" and time_info != "\xa0" and start_end != "" and start_end != "\xa0":
start, end = map(int, start_end.split("-"))
info = re.search(week_pattern, time_info).group()
single = re.search('ๅๅจ', info)
double = re.search('ๅๅจ', info)
if single is not None:
data['flag'] = 1
if is_even(start):
start += 1
if is_even(end):
end -= 1
elif double is not None:
data['flag'] = 2
if not is_even(start):
start += 1
if not is_even(end):
end -= 1
data['start'] = start
data['end'] = end
return data
def parse_time(time_info: str, location_info: str) -> list[dict]:
times = time_info.split(";")
locations = location_info.split(";")
result = []
for i, item in enumerate(times):
regex = re.compile(r'็ฌฌ(.{0,8})่')
regex_result = regex.findall(item)
course_period_list = []
for one in regex_result:
course_period_list.extend(one.split(','))
course_period_num = len(course_period_list)
if course_period_num != 0:
course_period_start = int(course_period_list[0])
course_period_end = int(course_period_list[course_period_num - 1])
one = {
'weekday': item[0:2],
'start': dict_course_start[course_period_start].strftime('%H:%M'),
'end': dict_course_end[course_period_end].strftime('%H:%M'),
'location': locations[i]
}
else:
one = {}
result.append(one)
return result
def parse_location(location_info: str) -> list:
location_info.replace('\\xa0', '')
locations = location_info.split(";")
return list(set(locations))
def parse_other(other_info: str) -> list:
return other_info.split(",")
|
const AbstractIndexer = require("../abstract_indexer");
const crypto = require("crypto");
class StatusIndexer extends AbstractIndexer {
get LOGGER_NAME() {
return 'status-indexer';
}
constructor(adapter, params) {
super(adapter, params);
}
indexStatus (reporter) {
const idHash = crypto.createHash('md5')
.update(JSON.stringify(reporter.report), 'utf-8')
.digest('hex');
reporter.report.timestamp = (new Date()).toString();
return this.indexDocument({
"index": this.esIndex,
"type": this.esType,
"id": idHash,
"body": JSON.stringify(reporter.report)
});
}
}
module.exports = StatusIndexer;
|
package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/signal"
"path/filepath"
"strings"
"time"
"github.com/gorilla/mux"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
"github.com/ezodude/kube-guard/privilege"
)
func newK8s() (kubernetes.Interface, error) {
home := homedir.HomeDir()
if home == "" {
return nil, fmt.Errorf("Could not find HOME directory")
}
path := filepath.Join(home, ".kube", "config")
config, err := clientcmd.BuildConfigFromFlags("", path)
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(config)
}
type searchPayload struct {
Subjects []string `json:"subjects"`
Format string `json:"format"`
}
type app struct {
router *mux.Router
k8s kubernetes.Interface
}
func (a *app) initialize() {
log.Println("App initializing")
a.router = mux.NewRouter()
a.router.HandleFunc("/api/v0.1/privilege/search", a.searchHandler).
Methods("GET").
Headers("Content-Type", "application/json")
}
func (a *app) searchHandler(w http.ResponseWriter, r *http.Request) {
log.Println("Handler searchHandler request received")
data, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Printf("%s status [%d]: %s\n", r.RequestURI, http.StatusInternalServerError, err.Error())
return
}
defer r.Body.Close()
var payload searchPayload
err = json.Unmarshal(data, &payload)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Printf("%s status [%d]: %s\n", r.RequestURI, http.StatusInternalServerError, err.Error())
return
}
log.Printf("Handler searchHandler payload:[%#v]", payload)
res, err := privilege.NewQuery().
Client(a.k8s).
Subjects(payload.Subjects).
ResultFormat(payload.Format).
Do()
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
log.Printf("%s status [%d]: %s\n", r.RequestURI, http.StatusInternalServerError, err.Error())
return
}
w.WriteHeader(http.StatusOK)
log.Printf("%s status [%d]\n", r.RequestURI, http.StatusOK)
switch strings.ToLower(payload.Format) {
case "yaml", "yml":
w.Header().Set("Content-Type", "application/x-yaml")
default:
w.Header().Set("Content-Type", "application/json")
}
w.Write(res)
}
func (a *app) run(port string) {
addr := fmt.Sprintf(":%s", port)
srv := &http.Server{
Addr: addr,
// Good practice to set timeouts to avoid Slowloris attacks.
WriteTimeout: time.Second * 15,
ReadTimeout: time.Second * 15,
IdleTimeout: time.Second * 60,
Handler: a.router,
}
// Run our server in a goroutine so that it doesn't block.
go func() {
log.Printf("Running server on %s\n", addr)
if err := srv.ListenAndServe(); err != nil {
log.Println(err)
}
}()
c := make(chan os.Signal, 1)
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
signal.Notify(c, os.Interrupt)
// Block until we receive our signal.
<-c
// Create a deadline to wait for.
ctx, cancel := context.WithTimeout(context.Background(), time.Second*15)
defer cancel()
// Doesn't block if no connections, but will otherwise wait
// until the timeout deadline.
srv.Shutdown(ctx)
log.Println("shutting down")
os.Exit(0)
}
func main() {
k8s, err := newK8s()
if err != nil {
panic(err)
}
a := app{}
a.k8s = k8s
a.initialize()
a.run("8080")
}
|
@extends('site.layouts.default')
@section('content')
@section('test')
<h1>{{ $title }}</h1>
<div>{{ link_to('post/create', 'ๆฐๅข') }}</div>
@if (isset($posts))
<ol>
@foreach ($posts as $post)
<li>
{{-- HtmL::linkRoute('post.show', $post->title, ['id'=>$post->id]) --}}
{{ Html::link('post/'.$post->id, $post->title) }}
({{ Html::link('post/'.$post->id.'/edit', '็ทจ่ผฏ') }})</li>
@endforeach
</ol>
@endif
@stop |
๏ปฟnamespace Rikrop.Core.Framework.Logging
{
public static class LogRecordDataTypes
{
public const string Simple = "Simple";
public const string Large = "Large";
public const string StackTrace = "StackTrace";
public const string Exception = "Exception";
}
} |
๏ปฟusing System;
using System.Reactive.Disposables;
using System.Reactive.Linq;
using System.Timers;
using Serilog;
namespace CreateTestObservable
{
class Program
{
static void Main(string[] args)
{
Log.Logger = new LoggerConfiguration().WriteTo.Console().CreateLogger();
var ob = Observable.Create<DateTime>(
observer =>
{
var tim = new Timer();
tim.Interval = TimeSpan.FromSeconds(5).TotalMilliseconds;
// Publish clock ticks
tim.Elapsed += (s, e) => observer.OnNext(e.SignalTime);
tim.Start();
return tim;
});
var subscription = ob.Subscribe(val => Log.Information("{val:hh:mm ss tt} was published", val));
Console.Read();
subscription.Dispose();
}
}
} |
๏ปฟusing AppRopio.Payments.Core.Bundle;
namespace AppRopio.Payments.CloudPayments.Core.Services
{
public interface ICloudPaymentsPaymentNavigationVmService
{
void NavigateToInAppPayment(PaymentOrderBundle bundle);
}
} |
require 'codtls/record'
require 'codtls/session'
require 'openssl/ccm'
require 'codtls/alert'
module CoDTLS
# TODO
module RecordLayer
# first dtls message will be removed from mesg, so u can call parse
# multiple times on a concatenation of many dtls records
def self.decrypt(packet, maxlen)
# packet = mesg, (address_family, port, hostname, numeric_address)
mesg, sender_inet_addr = packet
begin
record, data = Record.parse(mesg)
rescue RecordError
send_alert(sender_inet_addr, :fatal, :decode_error)
return ['', sender_inet_addr]
end
session = RedisSession.new(sender_inet_addr[3])
unless session.check_seq(record.seq_num)
send_alert(sender_inet_addr, :fatal, :decode_error)
return ['', sender_inet_addr]
end
if record.epoch > 0
keyblock = session.key_block
if keyblock.empty?
send_alert(sender_inet_addr, :fatal, :decode_error)
return ['', sender_inet_addr]
end
ccm = OpenSSL::CCM.new('AES', keyblock[16...32], 8)
data = ccm.decrypt(data,
record.nonce(keyblock[36...40]),
record.additional_data(data.length - 8))
if data.empty?
send_alert(sender_inet_addr, :fatal, :bad_record_mac)
return ['', sender_inet_addr]
end
else
if session.epoch > 0
# When Epoch > 0 is known, message in epoch 0 isnt acceptable
send_alert(sender_inet_addr, :fatal, :unexpected_message)
return ['', sender_inet_addr]
end
# WARNING: !!! -> disabled for testing purpose
# if record.type == :appdata
# send_alert(sender_inet_addr, :fatal, :unexpected_message)
# return ['', sender_inet_addr]
# end
end
if record.type == :alert
session.clear
return ['', sender_inet_addr]
end
session.seq = record.seq_num
[data[0...maxlen], sender_inet_addr]
end
def self.send_alert(sender_inet_addr, lvl, desc)
e = encrypt(Alert.new(lvl, desc).to_wire, sender_inet_addr[3], :alert)
s = UDPSocket.new(sender_inet_addr[0])
s.send(e, 0, sender_inet_addr[3], sender_inet_addr[1])
end
end
end
|
import matplotlib.pyplot as plt
class Segment:
"""
Each piece of the fractal is one 'segment', whether it's a single line (at the base) or made up
of smaller segments.
line -- A 2D array of colummn vectors representing points on a grid. The two points together
represent one line segment.
"""
def __init__(self, line):
self.line = line
self.segments = []
self.straight = True
if line[1][0] == line[1][1]:
self.horizontal = True
else:
self.horizontal = False
def fractalize(self):
"""
Breaks up the segment into eight smaller lines if the segment is a straight line. If the segment
is made up of segments already, then .fractalize is called on each segment. So, everytime
.fractalize is called on the main_segment/orginal line, it breaks up each smallest line in the
fractal into eight smaller lines.
"""
# Get the endpoints of the segments line to calculate the endpoints of each smaller segment
x1 = self.line[0][0]
x2 = self.line[0][1]
y1 = self.line[1][0]
y2 = self.line[1][1]
if self.straight:
if self.horizontal:
# ul is the length of one the smaller lines that is being created.
ul = (x2-x1) / 4
x1_ul = x1 + ul
x2__ul = x2 - ul
x1_2ul = x1 + ul + ul
y1_ul = y1 + ul
y1__ul = y1 - ul
y2__ul = y2 - ul
seg1 = Segment( [[x1, x1_ul], [y1, y1]] )
seg2 = Segment( [[x1_ul, x1_ul], [y1, y1_ul]] )
seg3 = Segment( [[x1_ul, x1_2ul], [y1_ul, y1_ul]] )
seg4 = Segment( [[x1_2ul, x1_2ul], [y1_ul, y1]] )
seg5 = Segment( [[x1_2ul, x1_2ul], [y1, y1__ul]])
seg6 = Segment( [[x1_2ul, x2__ul], [y1__ul, y2__ul]] )
seg7 = Segment( [[x2__ul, x2__ul], [y2__ul, y2]] )
seg8 = Segment( [[x2__ul, x2], [y2, y2]] )
else:
ul = (y2-y1) / 4
x1_ul = x1 + ul
x2_ul = x2 + ul
x1__ul = x1 - ul
y1_ul = y1 + ul
y1__ul = y1 - ul
y1_2ul = y1 + ul + ul
y2__ul = y2 - ul
seg1 = Segment( [[x1, x1], [y1, y1_ul]] )
seg2 = Segment( [[x1, x1__ul], [y1_ul, y1_ul]] )
seg3 = Segment( [[x1__ul, x1__ul], [y1_ul, y1_2ul]] )
seg4 = Segment( [[x1__ul, x1], [y1_2ul, y1_2ul]] )
seg5 = Segment( [[x1, x1_ul], [y1_2ul, y1_2ul]] )
seg6 = Segment( [[x1_ul, x1_ul], [y1_2ul, y2__ul]] )
seg7 = Segment( [[x2_ul, x2], [y2__ul, y2__ul]] )
seg8 = Segment( [[x2, x2], [y2__ul, y2]] )
self.segments = [seg1, seg2, seg3, seg4, seg5, seg6, seg7, seg8]
self.straight = False
else:
for segment in self.segments:
segment.fractalize()
def plot_fractal(seg):
"""
Once the fractal is made to the desired number of iterations, plot_fractal recursively goes
through each segment and plots it if it's straight, and calls itself it again on each segment
if the it's not just a line.
"""
if seg.straight:
plt.plot(seg.line[0], seg.line[1], color=(0,0,0))
else:
for seg in seg.segments:
plot_fractal(seg)
def make_fractal(first_line, iterations):
"""Calls .fractalize on the original line segment a number of times equal to iterations."""
main_segment = Segment(first_line)
for i in range(iterations):
main_segment.fractalize()
return main_segment
first_line = [[0, 4], [1, 1]]
main_segment = make_fractal(first_line, 4)
plot_fractal(main_segment)
plt.show() |
delete from relacionamento_regra_de_negocio;
delete from relacionamento_referencia;
delete from relacionamento_informacao_complementar;
delete from relacionamento_dados_revisao;
delete from regra_de_negocio;
delete from referencia;
delete from informacao_complementar;
delete from revisao;
delete from sistema;
delete from passos;
delete from fluxo;
delete from dados_revisao;
delete from caso_de_uso;
delete from ator; |
<?php
namespace App\Data\Repositories\Product;
use App\Product;
/**
* Class EloquentRepository
* @package App\Data\Repositories\Product
*/
class EloquentRepository implements ProductRepository
{
public function createProduct($attributes)
{
return Product::create($attributes);
}
public function getAllProducts()
{
return Product::all();
}
public function updateAProduct($product)
{
$product->save();
return $product;
}
public function deleteAProduct($product)
{
return $product->delete();
}
}
|
import React, { useContext, useState } from "react"
import Img from "gatsby-image"
import { useStaticQuery, graphql } from "gatsby"
import { CartContext } from "../context"
import { motion } from "framer-motion"
const ProductItem = ({ node, index, width }) => {
const { handleAddCart } = useContext(CartContext)
const action = {
type: "ADD",
playload: {
product: node,
},
}
const image = useStaticQuery(graphql`
query borQuery {
allFile {
edges {
node {
name
childImageSharp {
fluid(maxHeight: 800) {
...GatsbyImageSharpFluid
originalName
}
}
}
}
}
}
`)
const imageUrl = (image, file) => {
const data = image.allFile.edges.find(
e => e.node.childImageSharp.fluid.originalName === file
)
return data.node.childImageSharp.fluid
}
const shadow =
"0 10px 15px -3px rgba(0, 0, 0, 0.1), 0 4px 6px -2px rgba(0, 0, 0, 0.05)"
const cardMotion = {
hover: {
boxShadow: shadow,
},
hidden: {
opacity: 0,
y: 200,
},
visible: {
opacity: 1,
y: 0,
transition: {
duration: 0.7,
type: "spring",
damping: 100,
mass: 5,
},
},
}
return (
<motion.li
whileTap="tap"
whileHover="hover"
variants={cardMotion}
className="relative col-span-1 overflow-hidden bg-white"
>
<div className="p-4">
<h2 className="pb-2 mb-2 text-2xl font-semibold tracking-wider border-b border-gray-300 font-display">
{node.name} 2019
<span className="block text-lg font-light tracking-tight text-gray-800 font-body">
200 Ft
</span>
</h2>
<div className="relative flex items-center justify-center">
<Img
className="relative w-full"
fluid={imageUrl(image, node.image)}
/>
</div>
</div>
<div className="flex flex-col items-center justify-center w-full">
<motion.button
className="px-8 py-4 m-4 tracking-wider text-gray-100 bg-gray-900 "
onClick={() => handleAddCart(action)}
>
<span className="inline-block" whileHover={{ scale: 1.05 }}>
Kosรกrba rakom
</span>
</motion.button>
</div>
</motion.li>
)
}
export default ProductItem
// SAFETY
// <div className="box-border flex flex-col items-center w-full my-2 sm:w-1/2 md:w-1/3 lg:w-1/4">
// <div className="relative flex flex-col items-center w-10/12 py-4 m-2 bg-gray-100 rounded shadow-md md:w-11/12">
// <Img className="w-64 h-full" fluid={imageUrl(image, node.image)} />
// <h2 className="text-2xl font-bold text-gray-900">{node.name}</h2>
// <h3 className="text-base text-gray-800 uppercase">
// {node.type} • {node.category}
// </h3>
// <div className="flex flex-col items-center w-full my-1">
// <p className="absolute top-0 right-0 p-2 m-2 text-base font-bold text-gray-900 bg-gray-200">
// {node.price}Ft/ {node.amount}l
// </p>
// <button
// className="px-4 py-2 text-xl font-bold text-green-100 bg-green-800 rounded"
// onClick={() => handleAddCart(action)}
// >
// Kosรกrba rakom
// </button>
// </div>
// </div>
// </div>
|
# -*- coding: utf-8 -*-
import io
import os
import pandas as pd
import scrapy
from scrapy import Request
from scrapy import signals
from fooltrader.contract import files_contract
from fooltrader.contract.data_contract import STOCK_META_COL
from fooltrader.utils.utils import to_time_str
class AmericaListSpider(scrapy.Spider):
name = "stock_list"
def start_requests(self):
yield Request(
url='http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=nasdaq&render=download',
meta={'exchange': 'nasdaq'},
callback=self.download_stock_list)
yield Request(
url='http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=nyse&render=download',
meta={'exchange': 'nyse'},
callback=self.download_stock_list)
yield Request(
url='http://www.nasdaq.com/screening/companies-by-name.aspx?letter=0&exchange=amex&render=download',
meta={'exchange': 'amex'},
callback=self.download_stock_list)
def download_stock_list(self, response):
exchange = response.meta['exchange']
path = files_contract.get_security_list_path('stock', exchange)
df = pd.read_csv(io.BytesIO(response.body), dtype=str)
if df is not None:
if os.path.exists(path):
df_current = pd.read_csv(path, dtype=str)
df_current = df_current.set_index('code', drop=False)
else:
df_current = pd.DataFrame()
df = df.loc[:, ['Symbol', 'Name', 'IPOyear', 'Sector', 'industry']]
df = df.dropna(subset=['Symbol', 'Name'])
df.columns = ['code', 'name', 'listDate', 'sector', 'industry']
df.listDate = df.listDate.apply(lambda x: to_time_str(x))
df['exchange'] = exchange
df['type'] = 'stock'
df['id'] = df[['type', 'exchange', 'code']].apply(lambda x: '_'.join(x.astype(str)), axis=1)
df['sinaIndustry'] = ''
df['sinaConcept'] = ''
df['sinaArea'] = ''
df['timestamp'] = df['listDate']
df = df.set_index('code', drop=False)
diff = set(df.index.tolist()) - set(df_current.index.tolist())
diff = [item for item in diff if item != 'nan']
if diff:
df_current = df_current.append(df.loc[diff, :], ignore_index=False)
df_current = df_current.loc[:, STOCK_META_COL]
df_current.columns = STOCK_META_COL
df_current.to_csv(path, index=False)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = super(AmericaListSpider, cls).from_crawler(crawler, *args, **kwargs)
crawler.signals.connect(spider.spider_closed, signal=signals.spider_closed)
return spider
def spider_closed(self, spider, reason):
spider.logger.info('Spider closed: %s,%s\n', spider.name, reason)
|
//Crea objeto Empresa a partir de su Schema
//@Ref http://mongoosejs.com/docs/index.html
var mongoose = require('mongoose');
var db = require('./db'); //Establece la conexion con la base de datos
var empresaSchema = new mongoose.Schema({
_id: String,
ciudad: String,
pais: String
});
empresaSchema.methods.toString = function () {
return this._id + " ("+this.ciudad+", "+this.pais+")";
}
module.exports = mongoose.model('Empresa', empresaSchema);
|
package com.zhuwj.common.exception.handler;
import com.zhuwj.common.enums.ErrorCodeEnum;
import com.zhuwj.common.response.ResponseResult;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
/**
* @description: ๅค็ๆฆๆชๅจๅผๅธธๆงๅถๅจ
* @author: zhuwj
* @create: 2020-05-17 19:41
**/
@RestController
public class FilterExceptionHandler {
@RequestMapping("tokenExpiredException")
public ResponseResult hanldeTokenExpiredException() {
return ResponseResult.error(ErrorCodeEnum.TOKEN_OVERDUE);
}
@RequestMapping("jwtVerificationException")
public ResponseResult hanldeJwtVerificationException() {
return ResponseResult.error(ErrorCodeEnum.TOKEN_VALIDATE_FAIL);
}
}
|
<?php
namespace app\cms\controller;
use think\Db;
// use app\index\controller\Common;
class Index extends Common
{
public function index(){
$adminid = session('adminid');
// $admin = Db::name('admin')->where('admin_id', $adminid)->find();
$version = Db::query('SELECT VERSION() AS ver');
$admin = Db::name("admin")->where("admin_id", $adminid)->find();
$userArr["OS"] = PHP_OS;
$userArr["PV"] = PHP_VERSION;
$userArr["MV"] = $version[0]['ver'];
// pre($userArr);
$data = [
'admin' => $admin,
'userArr' => $userArr,
];
$this->assign($data);
// echo '123';
// pre( session('islog'));
return view();
}
}
|
(ns cook.queue-limit
(:require [clj-time.core :as time]
[clojure.tools.logging :as log]
[cook.cached-queries :as cached-queries]
[cook.config :as config]
[cook.datomic :as datomic]
[cook.queries :as queries]
[cook.regexp-tools :as regexp-tools]
[cook.util :as util]
[datomic.api :as d]
[plumbing.core :as pc]
[metrics.timers :as timers]))
(defn- per-pool-config
"Returns the :per-pool section of the queue-limits config"
[]
(:per-pool (config/queue-limits)))
(defn- pool-global-threshold
"Returns the pool-global-threshold for the given pool,
the value at which the per-user queue limit switches
from the 'normal' number to the 'constrained' number"
[pool-name]
(regexp-tools/match-based-on-pool-name
(per-pool-config)
pool-name
:pool-global-threshold
:default-value Integer/MAX_VALUE))
(defn- user-limit-normal
"Returns the user-limit-normal for the given pool"
[pool-name]
(regexp-tools/match-based-on-pool-name
(per-pool-config)
pool-name
:user-limit-normal
:default-value Integer/MAX_VALUE))
(defn- user-limit-constrained
"Returns the user-limit-constrained for the given pool"
[pool-name]
(regexp-tools/match-based-on-pool-name
(per-pool-config)
pool-name
:user-limit-constrained
:default-value Integer/MAX_VALUE))
(defn- update-interval-seconds
"Returns the interval in seconds at which
to refresh queue lengths from the database"
[]
(:update-interval-seconds
(config/queue-limits)))
(defn get-pending-jobs
"Queries for and returns the set of
currently pending jobs from the database"
[]
(-> datomic/conn
d/db
queries/get-pending-job-ents))
(defn- jobs->queue-lengths
"Given a collection of pending jobs, returns a map with two
sub-maps of the following shape:
{:pool->queue-length {pool-a 100 pool-b 200 ...}
:pool->user->queue-length {pool-a {user-x 10 user-y 20 user-z 70}
pool-b {user-x 20 user-y 40 user-z 140}
...}"
[pending-jobs]
(let [pool->pending-jobs
(group-by
cached-queries/job->pool-name
pending-jobs)
pool->user->queue-length
(pc/map-vals
#(pc/map-vals
count
(group-by
cached-queries/job-ent->user
%))
pool->pending-jobs)]
{:pool->queue-length
(pc/map-vals
#(->> % vals (reduce +))
pool->user->queue-length)
:pool->user->queue-length
pool->user->queue-length}))
(defn query-queue-lengths
"Queries for pending jobs from the database and
returns a map with two sub-maps of the following shape:
{:pool->queue-length {pool-a 100 pool-b 200 ...}
:pool->user->queue-length {pool-a {user-x 10 user-y 20 user-z 70}
pool-b {user-x 20 user-y 40 user-z 140}
...}"
[]
(jobs->queue-lengths (get-pending-jobs)))
(let [pool->queue-length-atom (atom {})
pool->user->queue-length-atom (atom {})]
(defn user-queue-length
"Returns the queue length for the given pool name and user"
[pool-name user]
(get-in
@pool->user->queue-length-atom
[pool-name user]
0))
(defn user-queue-limit
"Returns the queue length limit for the given pool name -- if the
pool-global queue length is <= than the pool global threshold, we
use the 'normal' per-user limit, otherwise, we switch to using the
'constrained' per-user limit"
[pool-name]
(let [pool-global-length (get @pool->queue-length-atom pool-name 0)
pool-global-threshold (pool-global-threshold pool-name)]
(if (<= pool-global-length pool-global-threshold)
(user-limit-normal pool-name)
(user-limit-constrained pool-name))))
(defn inc-queue-length!
"Increments the pool-global and per-user queue lengths for
the given pool name and user by the given number of jobs"
[pool-name user number-jobs]
{:pre [(some? pool-name)]}
(let [inc-number-jobs #(-> % (or 0) (+ number-jobs))]
(swap! pool->queue-length-atom update pool-name inc-number-jobs)
(swap! pool->user->queue-length-atom update-in [pool-name user] inc-number-jobs))
{:pool->queue-length @pool->queue-length-atom
:pool->user->queue-length @pool->user->queue-length-atom})
(defn dec-queue-length!
"Decrements the pool-global and per-user queue lengths for
the given set of pending jobs that are being killed"
[killed-pending-jobs]
(let [{:keys [pool->queue-length
pool->user->queue-length]}
(jobs->queue-lengths killed-pending-jobs)
subtract-fn
(fn [a b]
(-> a (- b) (max 0)))]
(swap! pool->queue-length-atom #(merge-with subtract-fn % pool->queue-length))
(swap! pool->user->queue-length-atom #(util/deep-merge-with subtract-fn % pool->user->queue-length)))
{:pool->queue-length @pool->queue-length-atom
:pool->user->queue-length @pool->user->queue-length-atom})
(timers/deftimer
[cook-scheduler
queue-limit
update-queue-lengths!-duration])
(defn update-queue-lengths!
"Queries queue lengths from the database and updates the atoms"
[]
(timers/time!
update-queue-lengths!-duration
(log/info "Starting queue length update")
(let [{:keys [pool->queue-length
pool->user->queue-length]
:as queue-lengths}
(query-queue-lengths)]
(log/info "Queried queue length" queue-lengths)
(reset! pool->queue-length-atom
pool->queue-length)
(reset! pool->user->queue-length-atom
pool->user->queue-length)
(log/info "Done with queue length update")))))
(defn start-updating-queue-lengths
"Starts the chime to update queue lengths at the configured interval"
[]
(let [interval-seconds (update-interval-seconds)]
(log/info "Starting queue length updating at intervals of" interval-seconds "seconds")
(chime/chime-at
(util/time-seq
(time/now)
(time/seconds interval-seconds))
(fn [_] (update-queue-lengths!))
{:error-handler
(fn [ex]
(log/error ex "Failed to update queue length"))})))
|
package main
import (
"sync"
"github.com/rbee3u/golab/utils"
)
const (
concurrency = 2
repeat = 50_000_000
)
func main() {
do("slow", &SlowCounter{})
do("fast", &FastCounter{})
}
func do(name string, counter Counter) {
defer utils.LogElapsed(name)()
fnList := []func(Counter){
Counter.IncrA,
Counter.IncrB,
}
var wg sync.WaitGroup
for i := range fnList {
fn := fnList[i]
for c := 0; c < concurrency; c++ {
wg.Add(1)
go func() {
defer wg.Done()
for r := 0; r < repeat; r++ {
fn(counter)
}
}()
}
}
wg.Wait()
}
|
#ifndef __PARAMS__Process__
#define __PARAMS__Process__
class Process;
#include <cstddef>
#include <string>
#include <cstddef>
#include <string>
#include <cstddef>
#include <cstddef>
#include "base/types.hh"
#include <cstddef>
#include <string>
#include <cstddef>
#include "params/System.hh"
#include <cstddef>
#include "params/SimObject.hh"
struct ProcessParams
: public SimObjectParams
{
std::string errout;
std::string input;
bool kvmInSE;
uint64_t max_stack_size;
std::string output;
System * system;
bool useArchPT;
};
#endif // __PARAMS__Process__
|
<?php
/* @var $this yii\web\View */
/* @var $model common\models\Advertise */
// The Regular Expression filter
$reg_exUrl = "/(http|https|ftp|ftps)\:\/\/[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(\/\S*)?/";
// The Text you want to filter for urls
$text = $model->message;
// Check if there is a url in the text
if(preg_match($reg_exUrl, $text, $url)) {
// make the urls hyper links
$text = preg_replace($reg_exUrl, "<a href=\"{$url[0]}\" target=\"_blank\">{$url[0]}</a> ", $text);
} else {
// if no urls in the text just return the text
//echo $text;
}
$this->title = $model->title;
?>
<div class="content">
<div class="row share-item">
<div class="col-md-12">
<h1><?php echo $model->title ?></h1>
<p>ฤฤng lรบc: <?php echo date('H:i A - d/m/y', $model->created_at); ?></p>
</div>
<div class="col-md-12">
<h5>
<?php echo $text ?>
</h5>
<?php if ($model->advertiseImages): ?>
<div class="row">
<?php foreach ($model->advertiseImages as $image): ?>
<div class="col-md-6" style="padding-bottom: 20px">
<?php echo \yii\helpers\Html::img($image->image,['class' => 'img-responsive']) ?>
</div>
<?php endforeach; ?>
</div>
<?php endif; ?>
</div>
</div> |
package qrterminal
import (
"io"
"github.com/mdp/rsc/qr"
)
const BLACK = "\033[40m \033[0m"
const WHITE = "\033[47m \033[0m"
// Level - the QR Code's redundancy level
const H = qr.H
const M = qr.M
const L = qr.L
//Config for generating a barcode
type Config struct {
Level qr.Level
Writer io.Writer
BlackChar string
WhiteChar string
}
// GenerateWithConfig expects a string to encode and a config
func GenerateWithConfig(text string, config Config) {
w := config.Writer
white := config.WhiteChar
black := config.BlackChar
code, _ := qr.Encode(text, config.Level)
// Frame the barcode in a 1 pixel border
w.Write([]byte(white))
for i := 0; i <= code.Size; i++ {
w.Write([]byte(white))
}
w.Write([]byte("\n"))
for i := 0; i <= code.Size; i++ {
w.Write([]byte(white))
for j := 0; j <= code.Size; j++ {
if code.Black(i, j) {
w.Write([]byte(black))
} else {
w.Write([]byte(white))
}
}
w.Write([]byte("\n"))
}
}
// Generate a QR Code and write it out to io.Writer
func Generate(text string, l qr.Level, w io.Writer) {
config := Config{
Level: qr.L,
Writer: w,
BlackChar: BLACK,
WhiteChar: WHITE,
}
GenerateWithConfig(text, config)
}
|
/*
* Copyright (C) 2012 Benjamin Boksa (http://www.boksa.de/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.boksa.rt.model;
import java.lang.reflect.InvocationTargetException;
import java.util.HashMap;
import java.util.Map;
import org.apache.commons.beanutils.BeanUtils;
import org.joda.time.DateTime;
/* Ticket fields as per http://requesttracker.wikia.com/wiki/REST#Ticket_Properties:
id:
Queue:
Owner:
Creator:
Subject:
Status:
Priority:
InitialPriority:
FinalPriority:
Requestors:
Cc:
AdminCc:
Created:
Starts:
Started:
Due:
Resolved:
Told:
LastUpdated:
TimeEstimated:
TimeWorked:
TimeLeft:
*/
public class RTTicket extends RTTicketAbstractObject implements RTCustomFieldObject {
private Long id;
private String queue;
private String owner;
private String creator;
private String subject;
private String status;
private Integer priority;
private Integer initialPriority;
private Integer finalPriority;
private String requestors;
private String cc;
private String adminCc;
private DateTime created;
private DateTime starts;
private DateTime started;
private DateTime due;
private DateTime resolved;
private DateTime told;
private DateTime lastUpdated;
private Long timeWorked;
private Long timeEstimated;
private Long timeLeft;
private Map<String,RTCustomField> customFields;
public RTTicket() {
this.customFields = new HashMap<String,RTCustomField>();
}
// getter and setter methods...
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getQueue() {
return queue;
}
public void setQueue(String queue) {
this.queue = queue;
}
public String getOwner() {
return owner;
}
public void setOwner(String owner) {
this.owner = owner;
}
public String getCreator() {
return creator;
}
public void setCreator(String creator) {
this.creator = creator;
}
public String getSubject() {
return subject;
}
public void setSubject(String subject) {
this.subject = subject;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
public Integer getPriority() {
return priority;
}
public void setPriority(Integer priority) {
this.priority = priority;
}
public Integer getInitialPriority() {
return initialPriority;
}
public void setInitialPriority(Integer initialPriority) {
this.initialPriority = initialPriority;
}
public Integer getFinalPriority() {
return finalPriority;
}
public void setFinalPriority(Integer finalPriority) {
this.finalPriority = finalPriority;
}
public String getRequestors() {
return requestors;
}
public void setRequestors(String requestors) {
this.requestors = requestors;
}
public String getCc() {
return cc;
}
public void setCc(String cc) {
this.cc = cc;
}
public String getAdminCc() {
return adminCc;
}
public void setAdminCc(String adminCc) {
this.adminCc = adminCc;
}
public DateTime getCreated() {
return created;
}
public void setCreated(DateTime created) {
this.created = created;
}
public DateTime getStarts() {
return starts;
}
public void setStarts(DateTime starts) {
this.starts = starts;
}
public DateTime getStarted() {
return started;
}
public void setStarted(DateTime started) {
this.started = started;
}
public DateTime getDue() {
return due;
}
public void setDue(DateTime due) {
this.due = due;
}
public DateTime getResolved() {
return resolved;
}
public void setResolved(DateTime resolved) {
this.resolved = resolved;
}
public DateTime getTold() {
return told;
}
public void setTold(DateTime told) {
this.told = told;
}
public DateTime getLastUpdated() {
return lastUpdated;
}
public void setLastUpdated(DateTime lastUpdated) {
this.lastUpdated = lastUpdated;
}
public Long getTimeEstimated() {
return timeEstimated;
}
public void setTimeEstimated(Long timeEstimated) {
this.timeEstimated = timeEstimated;
}
public Long getTimeWorked() {
return timeWorked;
}
public void setTimeWorked(Long timeWorked) {
this.timeWorked = timeWorked;
}
public Long getTimeLeft() {
return timeLeft;
}
public void setTimeLeft(Long timeLeft) {
this.timeLeft = timeLeft;
}
public Map<String, RTCustomField> getCustomFields() {
return customFields;
}
public void setCustomFields(Map<String, RTCustomField> customFields) {
this.customFields = customFields;
}
// toString...
@Override
public String toString() {
return "RTTicket [id=" + id
+ ", queue=" + queue
+ ", owner=" + owner
+ ", creator=" + creator
+ ", subject=" + subject
+ ", status=" + status
+ ", priority=" + priority
+ ", initialPriority=" + initialPriority
+ ", finalPriority=" + finalPriority
+ ", requestors=" + requestors
+ ", cc=" + cc
+ ", adminc cc=" + adminCc
+ ", created=" + created
+ ", starts=" + starts
+ ", started=" + started
+ ", due=" + due
+ ", resolved=" + resolved
+ ", told=" + told
+ ", lastUpdated=" + lastUpdated
+ ", timeWorked=" + timeWorked
+ ", timeEstimated=" + timeEstimated
+ ", timeLeft=" + timeLeft
+ ", customFields=" + customFields + "]";
}
public String getNewTicketParams() {
String params = "id: ticket/new"
+ "\nrequestor: " + requestors
+ "\nsubject: " + subject
+ "\ncc: " + cc
+ "\nadmincc: " + adminCc
+ "\nowner: " + owner
+ "\nstatus: new"
+ "\npriority: " + priority
+ "\ninitialPriority: " + initialPriority
+ "\nfinalPriority: " + finalPriority
+ "\ntimeEstimated: " + timeEstimated
+ "\nstarts: " + starts
+ "\ndue: " + due
+ "\nqueue: " + queue;
for (String customFieldName : customFields.keySet()) {
params += "\n" + customFieldName + ": " + customFields.get(customFieldName).getValue();
}
return params;
}
@Override
public void populate(Map<String, String> parameters) throws InvocationTargetException, IllegalAccessException {
BeanUtils.populate(this, parameters);
}
}
|
๏ปฟ//-----------------------------------------------------------------------
// <copyright company="Nuclei">
// Copyright 2013 Nuclei. Licensed under the Apache License, Version 2.0.
// </copyright>
//-----------------------------------------------------------------------
namespace Nuclei.Communication.Protocol
{
/// <summary>
/// Defines the interface for objects that register files for uploading.
/// </summary>
public interface IStoreUploads
{
/// <summary>
/// Registers a new file path for uploading
/// and returns a new token for use with the path.
/// </summary>
/// <param name="path">The full path to the file that should be uploaded.</param>
/// <returns>
/// The token that can be used to retrieve the file path.
/// </returns>
UploadToken Register(string path);
/// <summary>
/// Reregisters a file path for uploading with a given path.
/// </summary>
/// <param name="token">The token.</param>
/// <param name="path">The full path to the file that should be uploaded.</param>
void Reregister(UploadToken token, string path);
/// <summary>
/// Deregisters the file from upload and returns the path.
/// </summary>
/// <param name="token">The token.</param>
/// <returns>The file path that was registered with the given token.</returns>
string Deregister(UploadToken token);
/// <summary>
/// Determines if a path is stored for the given token.
/// </summary>
/// <param name="token">The token.</param>
/// <returns>
/// <see langword="true" /> if a path is stored for the given token;
/// otherwise, <see langword="false" />.
/// </returns>
bool HasRegistration(UploadToken token);
}
}
|
import type { QueryInterface } from 'sequelize';
import { DataTypes } from 'sequelize';
export async function up({ context: queryInterface }: Record<string, QueryInterface>): Promise<void> {
await queryInterface.createTable('BlogPost', {
id: {
type: DataTypes.UUID,
defaultValue: DataTypes.UUIDV4,
allowNull: false,
primaryKey: true,
},
title: {
allowNull: false,
type: DataTypes.STRING,
unique: true,
},
content: {
allowNull: true,
type: DataTypes.STRING,
},
createdAt: {
allowNull: false,
type: DataTypes.DATE
},
updatedAt: {
allowNull: false,
type: DataTypes.DATE
},
});
}
export async function down({ context: queryInterface }: Record<string, QueryInterface>): Promise<void> {
await queryInterface.dropTable('BlogPost');
}
|
import React from 'react';
interface Props {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
onClick: (...args: any[]) => void;
}
const SettingsMenuItem: React.FC<Props> = ({ onClick, children }) => {
return <li onClick={onClick}>{children}</li>;
};
export default SettingsMenuItem;
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.intel.hpnl.core;
import java.nio.ByteBuffer;
import java.util.concurrent.LinkedBlockingQueue;
public class RdmConnection {
public RdmConnection(long nativeHandle, RdmService rdmService) {
this.nativeHandle = nativeHandle;
this.rdmService = rdmService;
this.sendBufferList = new LinkedBlockingQueue<HpnlBuffer>();
this.localNameLength = get_local_name_length(this.nativeHandle);
this.localName = ByteBuffer.allocateDirect(localNameLength);
get_local_name(this.localName, this.nativeHandle);
this.localName.limit(localNameLength);
init(this.nativeHandle);
}
public RdmHandler getRecvCallback() {
return recvCallback;
}
public void setRecvCallback(RdmHandler callback) {
this.recvCallback = callback;
}
public RdmHandler getSendCallback() {
return sendCallback;
}
public void setSendCallback(RdmHandler callback) {
sendCallback = callback;
}
public void handleCallback(int eventType, int bufferId, int blockBufferSize) {
Exception e = null;
if (eventType == EventType.RECV_EVENT) {
e = executeCallback(recvCallback, bufferId, blockBufferSize);
} else if (eventType == EventType.SEND_EVENT) {
e = executeCallback(sendCallback, bufferId, blockBufferSize);
pushSendBuffer(rdmService.getSendBuffer(bufferId));
} else {
}
if(e != null){
e.printStackTrace();
}
}
public void pushSendBuffer(HpnlBuffer buffer) {
try {
sendBufferList.put(buffer);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
private HpnlBuffer takeSendBuffer() {
HpnlBuffer buf = sendBufferList.poll();
return buf;
}
public HpnlBuffer getRecvBuffer(int bufferId) {
return this.rdmService.getRecvBuffer(bufferId);
}
private Exception executeCallback(RdmHandler handler, int bufferId, int blockBufferSize){
if (handler == null) {
return null;
}
try{
handler.handle(this, bufferId, blockBufferSize);
}catch(Exception e){
return e;
}
return null;
}
public void send(ByteBuffer buffer, byte b, long seq) {
HpnlBuffer hpnlBuffer = takeSendBuffer();
hpnlBuffer.put(buffer, localNameLength, localName, b, seq);
send(hpnlBuffer.size(), hpnlBuffer.getBufferId(), this.nativeHandle);
}
public void sendTo(ByteBuffer buffer, byte b, long seq, ByteBuffer peerName) {
HpnlBuffer hpnlBuffer = takeSendBuffer();
hpnlBuffer.put(buffer, localNameLength, localName, b, seq);
sendTo(hpnlBuffer.size(), hpnlBuffer.getBufferId(), peerName, this.nativeHandle);
}
private native void init(long nativeHandle);
private native void get_local_name(ByteBuffer localName, long nativeHandle);
private native int get_local_name_length(long nativeHandle);
private native int send(int blockBufferSize, int bufferId, long nativeHandle);
private native int sendTo(int blockBufferSize, int bufferId, ByteBuffer peerName, long nativeHandle);
// 1 byte -> 0(connection) or 1(nonconnection)
// 4 bytes -> address length
// N bytes -> address
// 1 byte -> type
// 8 bytes -> seq id
// N bytes -> raw message
public native int sendBuf(ByteBuffer buffer, int bufferSize, long nativeHandle);
public native int sendBufTo(ByteBuffer buffer, int bufferSize, ByteBuffer peerName, long nativeHandle);
RdmService rdmService;
ByteBuffer localName;
int localNameLength;
private LinkedBlockingQueue<HpnlBuffer> sendBufferList;
private RdmHandler recvCallback = null;
private RdmHandler sendCallback = null;
private long nativeHandle;
}
|
#!/bin/sh
DOCKER_ARG="$@"
export DOCKER_ARG
exec /bin/s6-svscan /service
|
const jwt = require('jsonwebtoken');
const KEY_ID = process.env.KEY_ID;
const SECRET = process.env.SECRET;
/** @function
* @name signJwt
* will use the SECRET and KEY_ID to create a unique jwt for the smooch instantiation
* first set of characters up until the first period is unique. rest of the characters are
* different per method call*/
function signJwt(userId){
const result = jwt.sign({
scope: 'appUser',
userId: userId
},
SECRET, {
header: {
alg: 'HS256',
typ: 'JWT',
kid: KEY_ID
}
});
return result;
}
module.exports = signJwt; |
<?php
/*
* Plugin Name: ra7form
* Author: rafaantonio
* Text Domain: ra7form
* Domain Path: /languages
*/
class Ra7form_Plugin {
static $instance;
public $inpage;
public $recipients_obj;
public function __construct() {
$act = filter_input( INPUT_GET, 'action' );
$this->inpage = ( $act ) ? 0 : 1;
add_action( 'admin_menu', [ $this, 'plugin_menu' ] );
}
public function plugin_menu() {
if ( $this->inpage ) {
$inner = [ $this, 'plugin_settings_page' ];
} else {
$inner = [ $this, 'plugin_editor_page' ];
}
$hook = add_menu_page(
__( 'Recipients for Contact Form 7', 'ra7form' ),
__( 'Recipients', 'ra7form' ),
'manage_options',
'ra7form',
$inner,
'dashicons-email-alt2',
58
);
add_action( "load-$hook", [ $this, 'screen_option' ] );
}
public function plugin_settings_page() {
include_once( plugin_dir_path( __FILE__ ) . '/ra7form-list-template.php' );
}
public function plugin_editor_page() {
$rec = filter_input( INPUT_GET, 'post' );
$this->recipients_obj = new ra7form_Recipient( $rec );
$this->recipients_obj->prepare_item();
$this->recipients_obj->display();
}
public function screen_option() {
if ( $this->inpage ) {
$option = 'per_page';
$args = [
'label' => __( 'Recipients', 'ra7form' ),
'default' => 5,
'option' => 'recipients_per_page'
];
add_screen_option( $option, $args );
$this->recipients_obj = new Ra7form_List();
}
}
public static function get_instance() {
if ( !isset( self::$instance ) ) { self::$instance = new self(); }
return self::$instance;
}
}
|
package coadynamic
import (
"context"
"fbc/cwf/radius/modules"
"fbc/lib/go/radius"
"fbc/lib/go/radius/rfc2866"
"fmt"
"net"
"sync/atomic"
"testing"
"go.uber.org/zap"
"github.com/stretchr/testify/require"
)
func TestCoaDynamic(t *testing.T) {
// Arrange
secret := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06}
logger, _ := zap.NewDevelopment()
ctx, err := Init(logger, modules.ModuleConfig{
"port": 4799,
})
require.Nil(t, err)
// Spawn a mock radius server to return response for the coa request
var radiusResponseCounter uint32
radiusServer := radius.PacketServer{
Handler: radius.HandlerFunc(
func(w radius.ResponseWriter, r *radius.Request) {
atomic.AddUint32(&radiusResponseCounter, 1)
resp := r.Response(radius.CodeDisconnectACK)
w.Write(resp)
},
),
SecretSource: radius.StaticSecretSource(secret),
Addr: fmt.Sprintf(":%d", 4799),
Ready: make(chan bool, 1),
}
fmt.Print("Starting server... ")
go func() {
_ = radiusServer.ListenAndServe()
}()
defer radiusServer.Shutdown(context.Background())
listenSuccess := <-radiusServer.Ready // Wait for server to get ready
if !listenSuccess {
require.Fail(t, "radiusServer start error")
return
}
fmt.Println("Server listenning")
// Act
// Sending a coa request - expected to fail
generateRequest(ctx, radius.CodeDisconnectRequest, t, "session1", false)
require.Equal(t, uint32(1), atomic.LoadUint32(&radiusResponseCounter))
// Sending a non coa request
generateRequest(ctx, radius.CodeAccountingRequest, t, "session2")
require.Equal(t, uint32(1), atomic.LoadUint32(&radiusResponseCounter))
// Sending a coa request
res, err := generateRequest(ctx, radius.CodeDisconnectRequest, t, "session3", false)
require.Equal(t, uint32(2), atomic.LoadUint32(&radiusResponseCounter))
// Assert
require.Nil(t, err)
require.NotNil(t, res)
require.Equal(t, res.Code, radius.CodeDisconnectACK)
}
func generateRequest(ctx modules.Context, code radius.Code, t *testing.T, sessionID string, next ...bool) (*modules.Response, error) {
logger, _ := zap.NewDevelopment()
nextCalled := false
// Update tracker with some target endpoint
tracker := GetRadiusTracker()
tracker.Set(&radius.Request{
Packet: &radius.Packet{
Attributes: radius.Attributes{
rfc2866.AcctSessionID_Type: []radius.Attribute{radius.Attribute(sessionID)},
},
},
RemoteAddr: IPAddr{"127.0.0.1:1313"},
})
// Handle
res, err := Handle(
ctx,
&modules.RequestContext{
RequestID: 0,
Logger: logger,
SessionStorage: nil,
},
createRadiusRequest(code, sessionID),
func(c *modules.RequestContext, r *radius.Request) (*modules.Response, error) {
nextCalled = true
return nil, nil
},
)
// Verify
nextCalledExpected := true
if len(next) > 0 {
nextCalledExpected = next[0]
}
require.Equal(t, nextCalledExpected, nextCalled)
return res, err
}
func createRadiusRequest(code radius.Code, sessionID string) *radius.Request {
packet := radius.New(code, []byte{0x01, 0x02, 0x03, 0x4, 0x05, 0x06})
packet.Attributes[rfc2866.AcctSessionID_Type] = []radius.Attribute{radius.Attribute(sessionID)}
req := &radius.Request{}
req.RemoteAddr = &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 4799}
req = req.WithContext(context.Background())
req.Packet = packet
return req
}
// IPAddr
type IPAddr struct{ IP string }
func (a IPAddr) Network() string { return "ip" }
func (a IPAddr) String() string { return a.IP }
|
๏ปฟFunction Set-VTVMSwichAndState {
[CmdletBinding(PositionalBinding = $false)]
[OutputType([PSCustomObject])]
Param (
$ComputerName,
$vLabHostName,
$vLabName,
$vLabvLan
)
$ComputerNamevLab = "vLab-$ComputerName"
$ProcessName = $MyInvocation.MyCommand.Name
$Log = $Script:VTConfig.LogFile
$LogError = $Script:VTConfig.LogFileError
Try {
If (-not $vLabvLan) {
$Script:VMsVLans = Get-VTVMvLan
$VMvLan = (($Script:VMsVLans | Where-Object -FilterScript {
$_.Name -eq $ComputerName
}).vLan | Where-Object -FilterScript {
$PSItem -ne '0'
} | Select-Object -Unique)[0]
}
$SwitchName = 'Lab Isolated Network ({0}-{1})' -f $vLabName, $VMvLan
$Msg = "SwitchName: '$SwitchName', ComputerName: '$ComputerName'"
"[$ProcessName] [$((Get-Date).ToString())] [Info] $Msg" | Out-File -FilePath $Log -Append
Write-Verbose -Message $(Get-Info 'i' $Msg)
Invoke-Command -ComputerName $vLabHostName -ScriptBlock {
$VMNetworkAdapter = Get-VMSwitch | Where-Object -FilterScript {
$_.Name -eq $Using:SwitchName
}
If ($VMNetworkAdapter) {
$Msg = "VM switch exist: $($VMNetworkAdapter.Name)"
Write-Verbose -Message $Msg
Write-Verbose -Message "$($VMNetworkAdapter.Name)"
Connect-VMNetworkAdapter -VMName $Using:ComputerNamevLab -SwitchName $VMNetworkAdapter.Name
} Else {
$Msg = "VM switch does't exist: $($VMNetworkAdapter.Name)"
Write-Verbose -Message $Msg
}
If ((Get-VM $Using:ComputerNamevLab).State -ne 'Running') {
Start-VM -VMName $Using:ComputerNamevLab
}
}
} Catch {
Get-Date | Out-File -FilePath $LogError -Append
$_ | Out-File -FilePath $LogError -Append
Throw $_
}
} |
library search;
export 'data/search_cache.dart';
export 'data/search_client.dart';
export 'data/search_service.dart';
export 'models/license.dart';
export 'models/owner.dart';
export 'models/repo.dart';
export 'models/search_results.dart';
export 'models/search_results_error.dart';
export 'ui/search_presenter.dart';
export 'ui/search_state.dart';
|
'use strict'
var common = require('./common')
var tap = require('tap')
var helper = require('../../lib/agent_helper')
var METRIC_HOST_NAME = null
var METRIC_HOST_PORT = null
exports.MONGO_SEGMENT_RE = common.MONGO_SEGMENT_RE
exports.TRANSACTION_NAME = common.TRANSACTION_NAME
exports.DB_NAME = common.DB_NAME
exports.connect = common.connect
exports.close = common.close
exports.populate = populate
exports.test = collectionTest
function collectionTest(name, run) {
var collections = ['testCollection', 'testCollection2']
tap.test(name, {timeout: 10000}, function(t) {
var agent = null
var client = null
var db = null
var collection = null
t.autoend()
t.test('remote connection', function(t) {
t.autoend()
t.beforeEach(function(done) {
agent = helper.instrumentMockedAgent()
helper.bootstrapMongoDB(collections, function(err) {
if (err) {
return done(err)
}
var mongodb = require('mongodb')
METRIC_HOST_NAME = common.getHostName(agent)
METRIC_HOST_PORT = common.getPort()
common.connect(mongodb, null, function(err, res) {
if (err) {
return done(err)
}
client = res.client
db = res.db
collection = db.collection('testCollection')
populate(db, collection, done)
})
})
})
t.afterEach(function(done) {
common.close(client, db, function(err) {
helper.unloadAgent(agent)
agent = null
done(err)
})
})
t.test('should not error outside of a transaction', function(t) {
t.notOk(agent.getTransaction(), 'should not be in a transaction')
run(t, collection, function(err) {
t.error(err, 'running test should not error')
t.notOk(agent.getTransaction(), 'should not somehow gain a transaction')
t.end()
})
})
t.test('should generate the correct metrics and segments', function(t) {
helper.runInTransaction(agent, function(transaction) {
transaction.name = common.TRANSACTION_NAME
run(t, collection, function(err, segments, metrics) {
if (
!t.error(err, 'running test should not error') ||
!t.ok(agent.getTransaction(), 'should maintain tx state')
) {
return t.end()
}
t.equal(
agent.getTransaction().id, transaction.id,
'should not change transactions'
)
var segment = agent.tracer.getSegment()
var current = transaction.trace.root
for (var i = 0, l = segments.length; i < l; ++i) {
t.equal(current.children.length, 1, 'should have one child')
current = current.children[0]
t.equal(current.name, segments[i], 'child should be named ' + segments[i])
if (common.MONGO_SEGMENT_RE.test(current.name)) {
checkSegmentParams(t, current)
}
}
t.equal(current.children.length, 0, 'should have no more children')
t.ok(current === segment, 'should test to the current segment')
transaction.end()
common.checkMetrics(
t,
agent,
METRIC_HOST_NAME,
METRIC_HOST_PORT,
metrics || []
)
t.end()
})
})
})
t.test('should respect `datastore_tracer.instance_reporting`', function(t) {
agent.config.datastore_tracer.instance_reporting.enabled = false
helper.runInTransaction(agent, function(tx) {
run(t, collection, function(err) {
if (!t.error(err, 'running test should not error')) {
return t.end()
}
var current = tx.trace.root
while (current) {
if (common.MONGO_SEGMENT_RE.test(current.name)) {
t.comment('Checking segment ' + current.name)
const attributes = current.getAttributes()
t.notOk(
attributes.host,
'should not have host attribute'
)
t.notOk(
attributes.port_path_or_id,
'should not have port attribute'
)
t.ok(
attributes.database_name,
'should have database name attribute'
)
}
current = current.children[0]
}
t.end()
})
})
})
t.test('should respect `datastore_tracer.database_name_reporting`', function(t) {
agent.config.datastore_tracer.database_name_reporting.enabled = false
helper.runInTransaction(agent, function(tx) {
run(t, collection, function(err) {
if (!t.error(err, 'running test should not error')) {
return t.end()
}
var current = tx.trace.root
while (current) {
if (common.MONGO_SEGMENT_RE.test(current.name)) {
t.comment('Checking segment ' + current.name)
const attributes = current.getAttributes()
t.ok(
attributes.host,
'should have host attribute'
)
t.ok(
attributes.port_path_or_id,
'should have port attribute'
)
t.notOk(
attributes.database_name,
'should not have database name attribute'
)
}
current = current.children[0]
}
t.end()
})
})
})
})
// The domain socket tests should only be run if there is a domain socket
// to connect to, which only happens if there is a Mongo instance running on
// the same box as these tests. This should always be the case on Travis,
// but just to be sure they're running there check for the environment flag.
var domainPath = common.getDomainSocketPath()
var shouldTestDomain = domainPath || process.env.TRAVIS
t.test('domain socket', {skip: !shouldTestDomain}, function(t) {
t.autoend()
t.beforeEach(function(done) {
agent = helper.instrumentMockedAgent()
METRIC_HOST_NAME = agent.config.getHostnameSafe()
METRIC_HOST_PORT = domainPath
helper.bootstrapMongoDB(collections, function(err) {
if (err) {
return done(err)
}
var mongodb = require('mongodb')
common.connect(mongodb, domainPath, function(err, res) {
if (err) {
return done(err)
}
client = res.client
db = res.db
collection = db.collection('testCollection')
populate(db, collection, done)
})
})
})
t.afterEach(function(done) {
common.close(client, db, function(err) {
helper.unloadAgent(agent)
agent = null
done(err)
})
})
t.test('should have domain socket in metrics', function(t) {
t.notOk(agent.getTransaction(), 'should not have transaction')
helper.runInTransaction(agent, function(transaction) {
transaction.name = common.TRANSACTION_NAME
run(t, collection, function(err, segments, metrics) {
t.error(err)
transaction.end()
var re = new RegExp('^Datastore/instance/MongoDB/' + domainPath)
var badMetrics = Object.keys(agent.metrics.unscoped).filter(function(m) {
return re.test(m)
})
t.notOk(badMetrics.length, 'should not use domain path as host name')
common.checkMetrics(
t,
agent,
METRIC_HOST_NAME,
METRIC_HOST_PORT,
metrics || []
)
t.end()
})
})
})
})
})
}
function checkSegmentParams(t, segment) {
var dbName = common.DB_NAME
if (/\/rename$/.test(segment.name)) {
dbName = 'admin'
}
var attributes = segment.getAttributes()
t.equal(attributes.database_name, dbName, 'should have correct db name')
t.equal(attributes.host, METRIC_HOST_NAME, 'should have correct host name')
t.equal(attributes.port_path_or_id, METRIC_HOST_PORT, 'should have correct port')
}
function populate(db, collection, done) {
var items = []
for (var i = 0; i < 30; ++i) {
items.push({
i: i,
next3: [i + 1, i + 2, i + 3],
data: Math.random().toString(36).slice(2),
mod10: i % 10,
// spiral out
loc: [
(i % 4 && (i + 1) % 4 ? i : -i),
((i + 1) % 4 && (i + 2) % 4 ? i : -i)
]
})
}
db.collection('testCollection2').drop(function() {
collection.deleteMany({}, function(err) {
if (err) return done(err)
collection.insert(items, done)
})
})
}
|
import React from 'react';
import { Button, Input } from 'antd';
import { StyledForm } from './styles';
export interface SearchProps {
onSearch(args: string): void;
placeholder?: string;
loading?: boolean;
}
export const Search: React.FunctionComponent<SearchProps> = ({
onSearch,
placeholder = 'type a repository name',
loading = false,
}: SearchProps) => {
const [keyword, setKeyword] = React.useState('');
const [disableSearch, setDisabledSearch] = React.useState(true);
const minInputLen = 3;
const handleOnChange = ({
target: { value },
}: React.ChangeEvent<HTMLInputElement>): void => {
setKeyword(value);
setDisabledSearch(value.length < minInputLen);
};
const handleSearch = (event: React.MouseEvent<HTMLFormElement>): void => {
event.preventDefault();
if (keyword !== '' && !disableSearch) {
onSearch(keyword);
}
};
return (
<StyledForm onSubmit={handleSearch}>
<Input
aria-label="input"
type="search"
value={keyword}
onChange={handleOnChange}
placeholder={placeholder}
data-testid="input-search"
size="small"
/>
<Button
type="primary"
data-testid="submit-search"
disabled={disableSearch || loading}
onClick={handleSearch}
size="small"
loading={loading}
>
Search
</Button>
</StyledForm>
);
};
|
---
uid: web-forms/overview/ajax-control-toolkit/dropshadow/index
title: "DropShadow | Microsoft Docs"
author: rick-anderson
description: "This tutorial shows how to use the DropShadow control, which draws a drop shadow behind a panel."
ms.author: aspnetcontent
ms.date: 11/14/2011
ms.assetid: ccd48877-ed83-43fe-9b3b-ed8855e58833
msc.legacyurl: /web-forms/overview/ajax-control-toolkit/dropshadow
msc.type: chapter
---
DropShadow
====================
> This tutorial shows how to use the DropShadow control, which draws a drop shadow behind a panel.
- [Adjusting the Z-Index of a DropShadow (C#)](adjusting-the-z-index-of-a-dropshadow-cs.md)
- [Manipulating DropShadow Properties from Client Code (C#)](manipulating-dropshadow-properties-from-client-code-cs.md)
- [Adjusting the Z-Index of a DropShadow (VB)](adjusting-the-z-index-of-a-dropshadow-vb.md)
- [Manipulating DropShadow Properties from Client Code (VB)](manipulating-dropshadow-properties-from-client-code-vb.md)
|
#!/usr/bin/env bash
RSTAR_DEPS_BIN+=(
awk
gcc
make
perl
)
RSTAR_DEPS_PERL+=(
ExtUtils::Command
Pod::Usage
)
action() {
local LC_ALL
local OPTIND
local duration
local init
local prefix_absolute
while getopts ":b:p:" opt
do
case "$opt" in
b) RSTAR_BACKEND=$OPTARG ;;
p) RSTAR_PREFIX=$OPTARG ;;
*) emerg "Invalid option specified: $opt" ;;
esac
done
shift $(( OPTIND - 1 ))
# Throw OS-specific warnings, if any
case ${RSTAR_PLATFORM["key"]} in
openbsd)
# Check for userlimits
if [[ -z "$(userinfo "$(whoami)" | awk '$1 == "class" { print $2 }')" ]]
then
warn "Your user does not have a class, this may limit the installer's memory"
warn "usage, which can result in failure to compile."
fi
;;
esac
# Prepare environment for a reproducible install
case ${RSTAR_PLATFORM["key"]} in
dragonfly) LC_ALL=C ;;
linux-arch_linux) LC_ALL=en_US.UTF-8 ;;
*) LC_ALL=C.UTF-8 ;;
esac
# Distribution tarballs come with an epoch set, use it if you find it.
if [[ -f "$BASEDIR/etc/epoch.txt" ]]
then
SOURCE_DATE_EPOCH="$(head -n1 "$BASEDIR/etc/epoch.txt")"
debug "SOURCE_DATE_EPOCH set to $SOURCE_DATE_EPOCH (epoch.txt)"
fi
export LC_ALL
export SOURCE_DATE_EPOCH
# If no specific targets are specified, set all targets
if (( $# < 1 ))
then
set -- core modules
fi
# Take note of the current time, so we can show how long it took later
# on
init="$(date +%s)"
# Create the installation directory
mkdir -p -- "$RSTAR_PREFIX"
# Use an absolute path when reporting about the installation path
prefix_absolute="$(CDPATH="" cd -- "$RSTAR_PREFIX" 2> /dev/null && pwd -P)"
info "Installing Raku in $prefix_absolute"
# Run each installation target
for target in "$@"
do
if [[ $(type -t "action_install_$target") != "function" ]]
then
crit "Installation target '$target' is invalid"
continue
fi
"action_install_$target"
done
duration="$(pp_duration "$init")"
# Friendly message
info "Rakudo Star has been installed into $prefix_absolute!"
info "The installation took $duration."
info ""
info "You may need to add the following paths to your \$PATH:"
info " $prefix_absolute/bin"
info " $prefix_absolute/share/perl6/site/bin"
info " $prefix_absolute/share/perl6/vendor/bin"
info " $prefix_absolute/share/perl6/core/bin"
}
action_install_core() {
local args
args+=("--prefix=$RSTAR_PREFIX")
# Build relocatable components when not on OpenBSD.
if [[ ${RSTAR_PLATFORM[os]} != "openbsd" ]]
then
args+=("--relocatable")
fi
# Compile all core components
for component in moarvm nqp rakudo
do
VERSION="$(config_etc_kv "fetch_core.txt" "${component}_version")" \
build_"$component" "${args[@]}" && continue
die "Build failed!"
done
}
action_install_modules() {
local failed_modules
local modules
notice "Starting installation of bundled modules"
modules="$(tmpfile)"
awk '/^[^#]/ {print $1}' "$BASEDIR/etc/modules.txt" > "$modules"
while read -r module
do
info "Installing $module"
install_raku_module "$BASEDIR/src/rakudo-star-modules/$module" \
&& continue
failed_modules+=("$module")
done < "$modules"
# Show a list of all modules that failed to install
if [[ ${failed_modules[*]} ]]
then
crit "The following modules failed to install:"
for module in "${failed_modules[@]}"
do
crit " $module"
done
fi
}
build_moarvm() {
local logfile="/dev/stdout"
info "Starting build on MoarVM"
build_prepare "$BASEDIR/src/moarvm-$VERSION/MoarVM-$VERSION" || return
if [[ -z "$RSTAR_DEBUG" ]]
then
logfile="$(tmpfile)"
notice "Build log available at $logfile"
fi
{
perl Configure.pl "$@" \
&& make \
&& make install \
> "$logfile" \
|| return
} > "$logfile" 2>&1
}
build_nqp() {
local logfile="/dev/stdout"
info "Starting build on NQP"
build_prepare "$BASEDIR/src/nqp-$VERSION/nqp-$VERSION" || return
if [[ -z "$RSTAR_DEBUG" ]]
then
logfile="$(tmpfile)"
notice "Build log available at $logfile"
fi
{
perl Configure.pl --backend="$RSTAR_BACKEND" "$@" \
&& ${RSTAR_PLATFORM[make]} \
&& ${RSTAR_PLATFORM[make]} install \
|| return
} > "$logfile" 2>&1
}
build_rakudo() {
local logfile="/dev/stdout"
info "Starting build on Rakudo"
build_prepare "$BASEDIR/src/rakudo-$VERSION/rakudo-$VERSION" || return
if [[ -z "$RSTAR_DEBUG" ]]
then
logfile="$(tmpfile)"
notice "Build log available at $logfile"
fi
{
perl Configure.pl --backend="$RSTAR_BACKEND" "$@" \
&& ${RSTAR_PLATFORM[make]} \
&& ${RSTAR_PLATFORM[make]} install \
|| return
} > "$logfile" 2>&1
}
build_prepare() {
local source="$1"
local destination
destination="$(tmpdir)"
notice "Using $destination as working directory"
cp -R -- "$source/." "$destination" \
&& cd -- "$destination" \
|| return
}
install_raku_module() {
"$RSTAR_PREFIX/bin/raku" "$BASEDIR/lib/install-module.raku" "$1"
}
|
signup:
image: gokul711/flask-singup:v_BUILD_NUMBER
ports:
- "80:5000"
environment:
- APP_CONFIG=application.config
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from dipy.io.stateful_tractogram import Space, StatefulTractogram
from dipy.io.streamline import save_tractogram
import numpy as np
from scilpy.io.streamlines import load_tractogram_with_reference
from scilpy.io.utils import (assert_inputs_exist,
assert_outputs_exist,
add_overwrite_arg,
add_reference)
def _build_arg_parser():
p = argparse.ArgumentParser(
description='Assign an hexadecimal RGB color to a Trackvis TRK '
'tractogram. The hexadecimal RGB color should be '
'formatted as 0xRRGGBB or "#RRGGBB"',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('in_tractogram',
help='Tractogram.')
p.add_argument('out_tractogram',
help='Colored TRK tractogram.')
p.add_argument('color',
help='Can be either hexadecimal (ie. "#RRGGBB" '
'or 0xRRGGBB).')
add_reference(p)
add_overwrite_arg(p)
return p
def main():
parser = _build_arg_parser()
args = parser.parse_args()
assert_inputs_exist(parser, args.in_tractogram)
assert_outputs_exist(parser, args, args.out_tractogram)
if not args.out_tractogram.endswith('.trk'):
parser.error('Output file needs to end with .trk.')
if len(args.color) == 7:
args.color = '0x' + args.color.lstrip('#')
if len(args.color) == 8:
color_int = int(args.color, 0)
red = color_int >> 16
green = (color_int & 0x00FF00) >> 8
blue = color_int & 0x0000FF
else:
parser.error('Hexadecimal RGB color should be formatted as "#RRGGBB"'
' or 0xRRGGBB.')
sft = load_tractogram_with_reference(parser, args, args.in_tractogram)
sft.data_per_point["color"] = [np.tile([red, green, blue],
(len(i), 1)) for i in sft.streamlines]
sft = StatefulTractogram(sft.streamlines, sft, Space.RASMM,
data_per_point=sft.data_per_point)
save_tractogram(sft, args.out_tractogram)
if __name__ == '__main__':
main()
|
class BinTree(object):
"""Represent a tree object"""
def __init__(self, data, tree_left = None, tree_right = None):
self.__tree_left = tree_left
self.__tree_right = tree_right
self.__data = data
@property
def data(self):
return self.__data
@data.setter
def data(self, value):
self.__data = value
@property
def left(self):
return self.__tree_left
@left.setter
def left(self, value):
self.__tree_left = value
@property
def right(self):
return self.__tree_right
@right.setter
def right(self, value):
self.__tree_right = value
@property
def is_leaf(self):
return self.left == None and self.right == None
@staticmethod
def add_tree(tree, value):
"""
Add a sub tree by it's value to the correct place in the tree
In case a value that already exists given
it will be added to the right side of the tree
"""
if tree == None:
return BinTree(value)
if tree.data > value:
tree.left = BinTree.add_tree(tree.left, value)
else:
tree.right = BinTree.add_tree(tree.right, value)
return tree
@staticmethod
def inorder(tree):
"""
Travel the tree in a inorder way
"""
if tree == None:
return
BinTree.inorder(tree.left)
print("%s" % tree.data)
BinTree.inorder(tree.right)
@staticmethod
def preorder(tree):
"""
Travel the tree in a preorder way
"""
if tree == None:
return
print("%s" % tree.data)
BinTree.preorder(tree.left)
BinTree.preorder(tree.right)
@staticmethod
def postorder(tree):
"""
Travel the tree in a postorder way
"""
if tree == None:
return
BinTree.postorder(tree.left)
BinTree.postorder(tree.right)
print("%s" % tree.data)
@staticmethod
def breadthfirst(tree):
"""
Traverse the tree in a breadth first method
"""
que = []
que.insert(0, tree)
while(que):
item = que.pop()
print("%s" % item.data)
if item.left != None:
que.insert(0, item.left)
if item.right != None:
que.insert(0, item.right)
|
// 589. Connecting Graph
// Problem description: http://www.lintcode.com/problem/connecting-graph/
// Problem solution: https://www.jiuzhang.com/solutions/connecting-graph
/*
* Union Find, Design
*
*/
class ConnectingGraph {
public:
/*
* @param n: An integer
*/ConnectingGraph(int n) {
// do intialization if necessary
father.resize(n+1);
for(int i = 0; i <= n; i++) {
father[i] = i;
}
}
/*
* @param a: An integer
* @param b: An integer
* @return: nothing
*/
void connect(int a, int b) {
// write your code here
int x = find(a), y = find(b);
if(x != y) {
father[a] = b;
}
}
/*
* @param a: An integer
* @param b: An integer
* @return: A boolean
*/
bool query(int a, int b) {
// write your code here
return find(a) == find(b);
}
private:
vector<int> father;
int find(int x) {
if(father[x] == x) return x;
return father[x] = find(father[x]);
}
};
// Conclusion:
// Union Find Concept.
|
"""
shift_fields!(layer, offset)
Adds `offset` to the `layer` fields.
"""
function shift_fields! end
function shift_fields!(
layer::Union{Binary,Spin,Potts,Gaussian,ReLU,pReLU,xReLU}, offset::AbstractArray
)
@assert size(layer) == size(offset)
layer.ฮธ .+= offset
return layer
end
function shift_fields!(layer::dReLU, offset::AbstractArray)
@assert size(layer) == size(offset)
layer.ฮธp .+= offset
layer.ฮธn .+= offset
return layer
end
|
const wxConfig = require('../config/wxConfig')
const superagent = require('superagent')
const qs = require('querystring');
const WXBizDataCrypt = require('../util/WXBizDataCrypt') //่งฃๅฏๆไปถ
class userController {
/**
* ่ทๅๅฐ็จๅบunionId
* @param data
* @returns {Promise<*>}
*/
static async wxLogin(ctx) {
let req = ctx.request.body;
let js_code = req.code
let params = wxConfig;
params.js_code = js_code;
params = qs.stringify(params)
let url = 'https://api.weixin.qq.com/sns/jscode2session?' + params;
await superagent.get(url)
.then(res => {
let result = JSON.parse(res.text)
let sessionKey = result.session_key
let pc = new WXBizDataCrypt(wxConfig.appid, sessionKey)
let data = pc.decryptData(req.encryptedData, req.iv);//่งฃๅฏๅพๅฐๅฐ็จๅบuid็ญไฟกๆฏ
ctx.body = {
code: 200,
msg: '็ปๅฝๆๅ',
data
}
})
.catch(res => {
console.log(res)
})
}
}
module.exports = userController |
/*
*
* Copyright 2014 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package transport
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"math"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
var (
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
// the stream's state.
ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
// than the limit set by peer.
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
)
// serverConnectionCounter counts the number of connections a server has seen
// (equal to the number of http2Servers created). Must be accessed atomically.
var serverConnectionCounter uint64
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
ctx context.Context
done chan struct{}
conn net.Conn
loopy *loopyWriter
readerDone chan struct{} // sync point to enable testing.
writerDone chan struct{} // sync point to enable testing.
remoteAddr net.Addr
localAddr net.Addr
maxStreamID uint32 // max stream ID ever seen
authInfo credentials.AuthInfo // auth info about the connection
inTapHandle tap.ServerInHandle
framer *framer
// The max number of concurrent streams.
maxStreams uint32
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
controlBuf *controlBuffer
fc *trInFlow
stats stats.Handler
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
// Keepalive enforcement policy.
kep keepalive.EnforcementPolicy
// The time instance last ping was received.
lastPingAt time.Time
// Number of times the client has violated keepalive ping policy so far.
pingStrikes uint8
// Flag to signify that number of ping strikes should be reset to 0.
// This is set whenever data or header frames are sent.
// 1 means yes.
resetPingStrikes uint32 // Accessed atomically.
initialWindowSize int32
bdpEst *bdpEstimator
maxSendHeaderListSize *uint32
mu sync.Mutex // guard the following
// drainChan is initialized when drain(...) is called the first time.
// After which the server writes out the first GoAway(with ID 2^31-1) frame.
// Then an independent goroutine will be launched to later send the second GoAway.
// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
// Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
// already underway.
drainChan chan struct{}
state transportState
activeStreams map[uint32]*Stream
// idle is the time instant when the connection went idle.
// This is either the beginning of the connection or when the number of
// RPCs go down to 0.
// When the connection is busy, this value is set to 0.
idle time.Time
// Fields below are for channelz metric collection.
channelzID int64 // channelz unique identification number
czData *channelzData
bufferPool *bufferPool
connectionID uint64
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
// returned if something goes wrong.
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
writeBufSize := config.WriteBufferSize
readBufSize := config.ReadBufferSize
maxHeaderListSize := defaultServerMaxHeaderListSize
if config.MaxHeaderListSize != nil {
maxHeaderListSize = *config.MaxHeaderListSize
}
framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
// Send initial settings as connection preface to client.
isettings := []http2.Setting{{
ID: http2.SettingMaxFrameSize,
Val: http2MaxFrameLen,
}}
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
// permitted in the HTTP2 spec.
maxStreams := config.MaxStreams
if maxStreams == 0 {
maxStreams = math.MaxUint32
} else {
isettings = append(isettings, http2.Setting{
ID: http2.SettingMaxConcurrentStreams,
Val: maxStreams,
})
}
dynamicWindow := true
iwz := int32(initialWindowSize)
if config.InitialWindowSize >= defaultWindowSize {
iwz = config.InitialWindowSize
dynamicWindow = false
}
icwz := int32(initialWindowSize)
if config.InitialConnWindowSize >= defaultWindowSize {
icwz = config.InitialConnWindowSize
dynamicWindow = false
}
if iwz != defaultWindowSize {
isettings = append(isettings, http2.Setting{
ID: http2.SettingInitialWindowSize,
Val: uint32(iwz)})
}
if config.MaxHeaderListSize != nil {
isettings = append(isettings, http2.Setting{
ID: http2.SettingMaxHeaderListSize,
Val: *config.MaxHeaderListSize,
})
}
if config.HeaderTableSize != nil {
isettings = append(isettings, http2.Setting{
ID: http2.SettingHeaderTableSize,
Val: *config.HeaderTableSize,
})
}
if err := framer.fr.WriteSettings(isettings...); err != nil {
return nil, connectionErrorf(false, err, "transport: %v", err)
}
// Adjust the connection flow control window if needed.
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := framer.fr.WriteWindowUpdate(0, delta); err != nil {
return nil, connectionErrorf(false, err, "transport: %v", err)
}
}
kp := config.KeepaliveParams
if kp.MaxConnectionIdle == 0 {
kp.MaxConnectionIdle = defaultMaxConnectionIdle
}
if kp.MaxConnectionAge == 0 {
kp.MaxConnectionAge = defaultMaxConnectionAge
}
// Add a jitter to MaxConnectionAge.
kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge)
if kp.MaxConnectionAgeGrace == 0 {
kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace
}
if kp.Time == 0 {
kp.Time = defaultServerKeepaliveTime
}
if kp.Timeout == 0 {
kp.Timeout = defaultServerKeepaliveTimeout
}
kep := config.KeepalivePolicy
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
}
done := make(chan struct{})
t := &http2Server{
ctx: context.Background(),
done: done,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: config.AuthInfo,
framer: framer,
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
maxStreams: maxStreams,
inTapHandle: config.InTapHandle,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
activeStreams: make(map[uint32]*Stream),
stats: config.StatsHandler,
kp: kp,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
czData: new(channelzData),
bufferPool: newBufferPool(),
}
t.controlBuf = newControlBuffer(t.done)
if dynamicWindow {
t.bdpEst = &bdpEstimator{
bdp: initialWindowSize,
updateFlowControl: t.updateFlowControl,
}
}
if t.stats != nil {
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{}
t.stats.HandleConn(t.ctx, connBegin)
}
if channelz.IsOn() {
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
}
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
t.framer.writer.Flush()
defer func() {
if err != nil {
t.Close()
}
}()
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
}
if !bytes.Equal(preface, clientPreface) {
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
}
frame, err := t.framer.fr.ReadFrame()
if err == io.EOF || err == io.ErrUnexpectedEOF {
return nil, err
}
if err != nil {
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
}
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
}
t.handleSettings(sf)
go func() {
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
if err := t.loopy.run(); err != nil {
if logger.V(logLevel) {
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
}
}
t.conn.Close()
close(t.writerDone)
}()
go t.keepalive()
return t, nil
}
// operateHeader takes action on the decoded headers.
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
streamID := frame.Header().StreamID
state := &decodeState{
serverSide: true,
}
if err := state.decodeHeader(frame); err != nil {
if se, ok := status.FromError(err); ok {
t.controlBuf.put(&cleanupStream{
streamID: streamID,
rst: true,
rstCode: statusCodeConvTab[se.Code()],
onWrite: func() {},
})
}
return false
}
buf := newRecvBuffer()
s := &Stream{
id: streamID,
st: t,
buf: buf,
fc: &inFlow{limit: uint32(t.initialWindowSize)},
recvCompress: state.data.encoding,
method: state.data.method,
contentSubtype: state.data.contentSubtype,
}
if frame.StreamEnded() {
// s is just created by the caller. No lock needed.
s.state = streamReadDone
}
if state.data.timeoutSet {
s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
} else {
s.ctx, s.cancel = context.WithCancel(t.ctx)
}
pr := &peer.Peer{
Addr: t.remoteAddr,
}
// Attach Auth info if there is any.
if t.authInfo != nil {
pr.AuthInfo = t.authInfo
}
s.ctx = peer.NewContext(s.ctx, pr)
// Attach the received metadata to the context.
if len(state.data.mdata) > 0 {
s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
}
if state.data.statsTags != nil {
s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
}
if state.data.statsTrace != nil {
s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
}
if t.inTapHandle != nil {
var err error
info := &tap.Info{
FullMethodName: state.data.method,
}
s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil {
if logger.V(logLevel) {
logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
}
t.controlBuf.put(&cleanupStream{
streamID: s.id,
rst: true,
rstCode: http2.ErrCodeRefusedStream,
onWrite: func() {},
})
s.cancel()
return false
}
}
t.mu.Lock()
if t.state != reachable {
t.mu.Unlock()
s.cancel()
return false
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
t.mu.Unlock()
t.controlBuf.put(&cleanupStream{
streamID: streamID,
rst: true,
rstCode: http2.ErrCodeRefusedStream,
onWrite: func() {},
})
s.cancel()
return false
}
if streamID%2 != 1 || streamID <= t.maxStreamID {
t.mu.Unlock()
// illegal gRPC stream id.
if logger.V(logLevel) {
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
}
s.cancel()
return true
}
t.maxStreamID = streamID
t.activeStreams[streamID] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
t.mu.Unlock()
if channelz.IsOn() {
atomic.AddInt64(&t.czData.streamsStarted, 1)
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
}
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
s.ctx = traceCtx(s.ctx, s.method)
if t.stats != nil {
s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
inHeader := &stats.InHeader{
FullMethod: s.method,
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
Compression: s.recvCompress,
WireLength: int(frame.Header().Length),
Header: metadata.MD(state.data.mdata).Copy(),
}
t.stats.HandleRPC(s.ctx, inHeader)
}
s.ctxDone = s.ctx.Done()
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctxDone,
recv: s.buf,
freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
},
}
// Register the stream with loopy.
t.controlBuf.put(®isterStream{
streamID: s.id,
wq: s.wq,
})
handle(s)
return false
}
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
defer close(t.readerDone)
for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
if logger.V(logLevel) {
logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
}
t.mu.Lock()
s := t.activeStreams[se.StreamID]
t.mu.Unlock()
if s != nil {
t.closeStream(s, true, se.Code, false)
} else {
t.controlBuf.put(&cleanupStream{
streamID: se.StreamID,
rst: true,
rstCode: se.Code,
onWrite: func() {},
})
}
continue
}
if err == io.EOF || err == io.ErrUnexpectedEOF {
t.Close()
return
}
if logger.V(logLevel) {
logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
}
t.Close()
return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
if t.operateHeaders(frame, handle, traceCtx) {
t.Close()
break
}
case *http2.DataFrame:
t.handleData(frame)
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
t.handleSettings(frame)
case *http2.PingFrame:
t.handlePing(frame)
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
case *http2.GoAwayFrame:
// TODO: Handle GoAway from the client appropriately.
default:
if logger.V(logLevel) {
logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
}
}
}
}
func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
t.mu.Lock()
defer t.mu.Unlock()
if t.activeStreams == nil {
// The transport is closing.
return nil, false
}
s, ok := t.activeStreams[f.Header().StreamID]
if !ok {
// The stream is already done.
return nil, false
}
return s, true
}
// adjustWindow sends out extra window update over the initial window size
// of stream if the application is requesting data larger in size than
// the window.
func (t *http2Server) adjustWindow(s *Stream, n uint32) {
if w := s.fc.maybeAdjust(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
}
}
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
func (t *http2Server) updateWindow(s *Stream, n uint32) {
if w := s.fc.onRead(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
increment: w,
})
}
}
// updateFlowControl updates the incoming flow control windows
// for the transport and the stream based on the current bdp
// estimation.
func (t *http2Server) updateFlowControl(n uint32) {
t.mu.Lock()
for _, s := range t.activeStreams {
s.fc.newLimit(n)
}
t.initialWindowSize = int32(n)
t.mu.Unlock()
t.controlBuf.put(&outgoingWindowUpdate{
streamID: 0,
increment: t.fc.newLimit(n),
})
t.controlBuf.put(&outgoingSettings{
ss: []http2.Setting{
{
ID: http2.SettingInitialWindowSize,
Val: n,
},
},
})
}
func (t *http2Server) handleData(f *http2.DataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
sendBDPPing = t.bdpEst.add(size)
}
// Decouple connection's flow control from application's read.
// An update on connection's flow control should not depend on
// whether user application has read the data or not. Such a
// restriction is already imposed on the stream's flow control,
// and therefore the sender will be blocked anyways.
// Decoupling the connection flow control will prevent other
// active(fast) streams from starving in presence of slow or
// inactive streams.
if w := t.fc.onData(size); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{
streamID: 0,
increment: w,
})
}
if sendBDPPing {
// Avoid excessive ping detection (e.g. in an L7 proxy)
// by sending a window update prior to the BDP ping.
if w := t.fc.reset(); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{
streamID: 0,
increment: w,
})
}
t.controlBuf.put(bdpPing)
}
// Select the right stream to dispatch.
s, ok := t.getStream(f)
if !ok {
return
}
if size > 0 {
if err := s.fc.onData(size); err != nil {
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
return
}
if f.Header().Flags.Has(http2.FlagDataPadded) {
if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
}
}
// TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
buffer := t.bufferPool.get()
buffer.Reset()
buffer.Write(f.Data())
s.write(recvMsg{buffer: buffer})
}
}
if f.Header().Flags.Has(http2.FlagDataEndStream) {
// Received the end of stream from the client.
s.compareAndSwapState(streamActive, streamReadDone)
s.write(recvMsg{err: io.EOF})
}
}
func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
// If the stream is not deleted from the transport's active streams map, then do a regular close stream.
if s, ok := t.getStream(f); ok {
t.closeStream(s, false, 0, false)
return
}
// If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map.
t.controlBuf.put(&cleanupStream{
streamID: f.Header().StreamID,
rst: false,
rstCode: 0,
onWrite: func() {},
})
}
func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
if f.IsAck() {
return
}
var ss []http2.Setting
var updateFuncs []func()
f.ForeachSetting(func(s http2.Setting) error {
switch s.ID {
case http2.SettingMaxHeaderListSize:
updateFuncs = append(updateFuncs, func() {
t.maxSendHeaderListSize = new(uint32)
*t.maxSendHeaderListSize = s.Val
})
default:
ss = append(ss, s)
}
return nil
})
t.controlBuf.executeAndPut(func(interface{}) bool {
for _, f := range updateFuncs {
f()
}
return true
}, &incomingSettings{
ss: ss,
})
}
const (
maxPingStrikes = 2
defaultPingTimeout = 2 * time.Hour
)
func (t *http2Server) handlePing(f *http2.PingFrame) {
if f.IsAck() {
if f.Data == goAwayPing.data && t.drainChan != nil {
close(t.drainChan)
return
}
// Maybe it's a BDP ping.
if t.bdpEst != nil {
t.bdpEst.calculate(f.Data)
}
return
}
pingAck := &ping{ack: true}
copy(pingAck.data[:], f.Data[:])
t.controlBuf.put(pingAck)
now := time.Now()
defer func() {
t.lastPingAt = now
}()
// A reset ping strikes means that we don't need to check for policy
// violation for this ping and the pingStrikes counter should be set
// to 0.
if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) {
t.pingStrikes = 0
return
}
t.mu.Lock()
ns := len(t.activeStreams)
t.mu.Unlock()
if ns < 1 && !t.kep.PermitWithoutStream {
// Keepalive shouldn't be active thus, this new ping should
// have come after at least defaultPingTimeout.
if t.lastPingAt.Add(defaultPingTimeout).After(now) {
t.pingStrikes++
}
} else {
// Check if keepalive policy is respected.
if t.lastPingAt.Add(t.kep.MinTime).After(now) {
t.pingStrikes++
}
}
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
if logger.V(logLevel) {
logger.Errorf("transport: Got too many pings from the client, closing the connection.")
}
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
}
}
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
t.controlBuf.put(&incomingWindowUpdate{
streamID: f.Header().StreamID,
increment: f.Increment,
})
}
func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField {
for k, vv := range md {
if isReservedHeader(k) {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
continue
}
for _, v := range vv {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
return headerFields
}
func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
if t.maxSendHeaderListSize == nil {
return true
}
hdrFrame := it.(*headerFrame)
var sz int64
for _, f := range hdrFrame.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
if logger.V(logLevel) {
logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
}
return false
}
}
return true
}
// WriteHeader sends the header metadata md back to the client.
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
if s.updateHeaderSent() || s.getState() == streamDone {
return ErrIllegalHeaderWrite
}
s.hdrMu.Lock()
if md.Len() > 0 {
if s.header.Len() > 0 {
s.header = metadata.Join(s.header, md)
} else {
s.header = md
}
}
if err := t.writeHeaderLocked(s); err != nil {
s.hdrMu.Unlock()
return err
}
s.hdrMu.Unlock()
return nil
}
func (t *http2Server) setResetPingStrikes() {
atomic.StoreUint32(&t.resetPingStrikes, 1)
}
func (t *http2Server) writeHeaderLocked(s *Stream) error {
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
if s.sendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
streamID: s.id,
hf: headerFields,
endStream: false,
onWrite: t.setResetPingStrikes,
})
if !success {
if err != nil {
return err
}
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
if t.stats != nil {
// Note: Headers are compressed with hpack after this call returns.
// No WireLength field is set here.
outHeader := &stats.OutHeader{
Header: s.header.Copy(),
Compression: s.sendCompress,
}
t.stats.HandleRPC(s.Context(), outHeader)
}
return nil
}
// WriteStatus sends stream status to the client and terminates the stream.
// There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
if s.getState() == streamDone {
return nil
}
s.hdrMu.Lock()
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
if !s.updateHeaderSent() { // No headers have been sent.
if len(s.header) > 0 { // Send a separate header frame.
if err := t.writeHeaderLocked(s); err != nil {
s.hdrMu.Unlock()
return err
}
} else { // Send a trailer only response.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
}
}
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
} else {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
}
}
// Attach the trailer metadata.
headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer)
trailingHeader := &headerFrame{
streamID: s.id,
hf: headerFields,
endStream: true,
onWrite: t.setResetPingStrikes,
}
s.hdrMu.Unlock()
success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
if !success {
if err != nil {
return err
}
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
// Send a RST_STREAM after the trailers if the client has not already half-closed.
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
if t.stats != nil {
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
Trailer: s.trailer.Copy(),
})
}
return nil
}
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil {
if _, ok := err.(ConnectionError); ok {
return err
}
// TODO(mmukhi, dfawley): Make sure this is the right code to return.
return status.Errorf(codes.Internal, "transport: %v", err)
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
s.cancel()
select {
case <-t.done:
return ErrConnClosing
default:
}
return ContextErr(s.ctx.Err())
}
}
df := &dataFrame{
streamID: s.id,
h: hdr,
d: data,
onEachWrite: t.setResetPingStrikes,
}
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
select {
case <-t.done:
return ErrConnClosing
default:
}
return ContextErr(s.ctx.Err())
}
return t.controlBuf.put(df)
}
// keepalive running in a separate goroutine does the following:
// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle.
// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge.
// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge.
// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection
// after an additional duration of keepalive.Timeout.
func (t *http2Server) keepalive() {
p := &ping{}
// True iff a ping has been sent, and no data has been received since then.
outstandingPing := false
// Amount of time remaining before which we should receive an ACK for the
// last sent ping.
kpTimeoutLeft := time.Duration(0)
// Records the last value of t.lastRead before we go block on the timer.
// This is required to check for read activity since then.
prevNano := time.Now().UnixNano()
// Initialize the different timers to their default values.
idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
kpTimer := time.NewTimer(t.kp.Time)
defer func() {
// We need to drain the underlying channel in these timers after a call
// to Stop(), only if we are interested in resetting them. Clearly we
// are not interested in resetting them here.
idleTimer.Stop()
ageTimer.Stop()
kpTimer.Stop()
}()
for {
select {
case <-idleTimer.C:
t.mu.Lock()
idle := t.idle
if idle.IsZero() { // The connection is non-idle.
t.mu.Unlock()
idleTimer.Reset(t.kp.MaxConnectionIdle)
continue
}
val := t.kp.MaxConnectionIdle - time.Since(idle)
t.mu.Unlock()
if val <= 0 {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
t.drain(http2.ErrCodeNo, []byte{})
return
}
idleTimer.Reset(val)
case <-ageTimer.C:
t.drain(http2.ErrCodeNo, []byte{})
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
select {
case <-ageTimer.C:
// Close the connection after grace period.
if logger.V(logLevel) {
logger.Infof("transport: closing server transport due to maximum connection age.")
}
t.Close()
case <-t.done:
}
return
case <-kpTimer.C:
lastRead := atomic.LoadInt64(&t.lastRead)
if lastRead > prevNano {
// There has been read activity since the last time we were
// here. Setup the timer to fire at kp.Time seconds from
// lastRead time and continue.
outstandingPing = false
kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
prevNano = lastRead
continue
}
if outstandingPing && kpTimeoutLeft <= 0 {
if logger.V(logLevel) {
logger.Infof("transport: closing server transport due to idleness.")
}
t.Close()
return
}
if !outstandingPing {
if channelz.IsOn() {
atomic.AddInt64(&t.czData.kpCount, 1)
}
t.controlBuf.put(p)
kpTimeoutLeft = t.kp.Timeout
outstandingPing = true
}
// The amount of time to sleep here is the minimum of kp.Time and
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
kpTimeoutLeft -= sleepDuration
kpTimer.Reset(sleepDuration)
case <-t.done:
return
}
}
}
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
func (t *http2Server) Close() error {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
return errors.New("transport: Close() was already called")
}
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
t.mu.Unlock()
t.controlBuf.finish()
close(t.done)
err := t.conn.Close()
if channelz.IsOn() {
channelz.RemoveEntry(t.channelzID)
}
// Cancel all active streams.
for _, s := range streams {
s.cancel()
}
if t.stats != nil {
connEnd := &stats.ConnEnd{}
t.stats.HandleConn(t.ctx, connEnd)
}
return err
}
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
s.cancel()
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; ok {
delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
}
}
t.mu.Unlock()
if channelz.IsOn() {
if eosReceived {
atomic.AddInt64(&t.czData.streamsSucceeded, 1)
} else {
atomic.AddInt64(&t.czData.streamsFailed, 1)
}
}
}
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
oldState := s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
return
}
hdr.cleanup = &cleanupStream{
streamID: s.id,
rst: rst,
rstCode: rstCode,
onWrite: func() {
t.deleteStream(s, eosReceived)
},
}
t.controlBuf.put(hdr)
}
// closeStream clears the footprint of a stream when the stream is not needed any more.
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
s.swapState(streamDone)
t.deleteStream(s, eosReceived)
t.controlBuf.put(&cleanupStream{
streamID: s.id,
rst: rst,
rstCode: rstCode,
onWrite: func() {},
})
}
func (t *http2Server) RemoteAddr() net.Addr {
return t.remoteAddr
}
func (t *http2Server) Drain() {
t.drain(http2.ErrCodeNo, []byte{})
}
func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
t.mu.Lock()
defer t.mu.Unlock()
if t.drainChan != nil {
return
}
t.drainChan = make(chan struct{})
t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
}
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
// Handles outgoing GoAway and returns true if loopy needs to put itself
// in draining mode.
func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
t.mu.Lock()
if t.state == closing { // TODO(mmukhi): This seems unnecessary.
t.mu.Unlock()
// The transport is closing.
return false, ErrConnClosing
}
sid := t.maxStreamID
if !g.headsUp {
// Stop accepting more streams now.
t.state = draining
if len(t.activeStreams) == 0 {
g.closeConn = true
}
t.mu.Unlock()
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
if g.closeConn {
// Abruptly close the connection following the GoAway (via
// loopywriter). But flush out what's inside the buffer first.
t.framer.writer.Flush()
return false, fmt.Errorf("transport: Connection closing")
}
return true, nil
}
t.mu.Unlock()
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
// Follow that with a ping and wait for the ack to come back or a timer
// to expire. During this time accept new streams since they might have
// originated before the GoAway reaches the client.
// After getting the ack or timer expiration send out another GoAway this
// time with an ID of the max stream server intends to process.
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
return false, err
}
if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
return false, err
}
go func() {
timer := time.NewTimer(time.Minute)
defer timer.Stop()
select {
case <-t.drainChan:
case <-timer.C:
case <-t.done:
return
}
t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
}()
return false, nil
}
func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
s := channelz.SocketInternalMetric{
StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
LocalFlowControlWindow: int64(t.fc.getSize()),
SocketOptions: channelz.GetSocketOption(t.conn),
LocalAddr: t.localAddr,
RemoteAddr: t.remoteAddr,
// RemoteName :
}
if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
s.Security = au.GetSecurityValue()
}
s.RemoteFlowControlWindow = t.getOutFlowWindow()
return &s
}
func (t *http2Server) IncrMsgSent() {
atomic.AddInt64(&t.czData.msgSent, 1)
atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
}
func (t *http2Server) IncrMsgRecv() {
atomic.AddInt64(&t.czData.msgRecv, 1)
atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
}
func (t *http2Server) getOutFlowWindow() int64 {
resp := make(chan uint32, 1)
timer := time.NewTimer(time.Second)
defer timer.Stop()
t.controlBuf.put(&outFlowControlSizeRequest{resp})
select {
case sz := <-resp:
return int64(sz)
case <-t.done:
return -1
case <-timer.C:
return -2
}
}
func getJitter(v time.Duration) time.Duration {
if v == infinity {
return 0
}
// Generate a jitter between +/- 10% of the value.
r := int64(v / 10)
j := grpcrand.Int63n(2*r) - r
return time.Duration(j)
}
|
SELECT SIN(0) FROM db_root;
SELECT ROUND(SIN(1)) FROM db_root;
SELECT SIN('0') FROM db_root;
SELECT SIN('a') FROM db_root;
SELECT SIN(NULL) FROM db_root;
select cast(sin(123456789012345.12345) as numeric(6,5)) from db_root;
|
<?php
namespace Locale\Database;
use Illuminate\Database\Eloquent\Builder;
use Locale\Models\Localizable;
class LocalizableBuilder extends Builder
{
/**
* @var bool
*/
protected $translationJoined = false;
/**
* Add an "order by" clause to the query.
*
* @param string $column
* @param string $direction
* @return Builder|LocalizableBuilder
*/
public function orderBy($column, $direction = 'asc')
{
/** @var Localizable $localizableModel */
$localizableModel = $this->model;
if ($localizableModel->isLocalizableAttribute($column)) {
$this->joinWithTranslation();
}
return parent::orderBy($column, $direction);
}
/**
* @since 1.0.0
*/
public function joinWithTranslation()
{
if (!$this->translationJoined) {
/** @var Localizable $localizableModel */
$localizableModel = $this->model;
$localeTable = config("locale.model");
$modelTable = $localizableModel->getTable();
$modelKeyName = $localizableModel->getKeyName();
$joiningTable = $localizableModel->joiningLocaleTable($localeTable, $modelTable);
$modelForeignKey = $localizableModel->getModelForeignKey();
$this->join($joiningTable, "{$modelTable}.{$modelKeyName}", "=", "{$joiningTable}.{$modelForeignKey}");
}
}
}
|
package com.tradingview.lightweightcharts.api.series.models
import androidx.annotation.ColorInt
import com.google.gson.annotations.JsonAdapter
import com.tradingview.lightweightcharts.api.series.common.SeriesData
data class HistogramData(
override val time: Time,
val value: Float,
/**
* Optional color value for certain data item. If missed, color from HistogramSeriesOptions is used
*/
@ColorInt
@JsonAdapter(ColorAdapter::class)
val color: IntColor? = null
): SeriesData |
package dawg
import (
"crypto/rand"
"encoding/json"
"fmt"
"os"
"github.com/jtacoma/uritemplates"
)
type URITemplate uritemplates.UriTemplate
func (t *URITemplate) UnmarshalJSON(b []byte) error {
tpl, err := uritemplates.Parse(string(b))
if err != nil {
return err
}
*t = URITemplate((*tpl))
return nil
}
//
// {
// "myservice": {
// "template": "https://foo/bar/{id}/{kind}",
// "substitutions": {
// "myshortcut": {
// "id": "5000",
// "kind": "funny"
// }
// }
// }
// }
type Config map[string]*ServiceConfig
func (c Config) GetService(service string) (ServiceConfig, error) {
if s, ok := c[service]; ok {
return *s, nil
} else {
return ServiceConfig{}, fmt.Errorf("service '%s' not found", service)
}
}
type ServiceConfig struct {
GUID string `json:"-"`
Template URITemplate `json:"template"`
Keyword string `json:"keyword"`
Substitutions map[string]map[string]interface{} `json:"substitutions"`
}
func (s *ServiceConfig) GetURL(shortcut string) (string, error) {
var shortcutVars map[string]interface{}
var ok bool
if shortcutVars, ok = s.Substitutions[shortcut]; !ok {
return "", fmt.Errorf("shortcut '%s' not found", shortcut)
}
casted := uritemplates.UriTemplate(s.Template)
if expanded, err := casted.Expand(shortcutVars); err != nil {
return "", fmt.Errorf("could not expand URL template: %v", err)
} else {
return expanded, nil
}
}
func (s *ServiceConfig) Shortcuts() []string {
shortcuts := make([]string, 0, len(s.Substitutions))
for c, _ := range s.Substitutions {
shortcuts = append(shortcuts, c)
}
return shortcuts
}
func ReadConfig(path string) (Config, error) {
file, err := os.Open(path)
if err != nil {
return Config{}, fmt.Errorf("could not open config file: %s", err)
}
defer file.Close()
var cfg Config
dec := json.NewDecoder(file)
if err := dec.Decode(&cfg); err != nil {
return Config{}, fmt.Errorf("could not parse config file: %s", err)
}
for svc, _ := range cfg {
cfg[svc].GUID = GUID() // randomly assign guids for alfred workflow objects
}
return cfg, nil
}
func GUID() string {
u := make([]byte, 16)
_, err := rand.Read(u)
if err != nil {
panic(err)
}
u[6] = (u[6] & 0x0f) | 0x40 // Version 4
u[8] = (u[8] & 0x3f) | 0x80 // Variant is 10
return fmt.Sprintf("%X-%X-%X-%X-%X", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
}
|
class Array
def find_by(conditions)
find do |i|
conditions.all? do |key, value|
i.respond_to?(key) && (i.__send__(key) == value)
end
end
end
end
|
package com.letter.days.view
import android.content.Context
import android.graphics.Canvas
import android.graphics.Color
import android.graphics.Paint
import android.util.AttributeSet
import android.view.View
import com.letter.days.R
import android.content.dp2px
import kotlin.math.min
import kotlin.properties.Delegates
/**
* Progress View
* @property max Int progress ๆๅคงๅผ
* @property progress Int progress ่ฟๅบฆๅผ
* @property text String? ๆพ็คบๆๆฌ
* @property textSize Float ๆๆฌๅคงๅฐ
* @property textColor Int ๆๆฌ้ข่ฒ
* @property strokeWidth Float ่ฟๅบฆๆกๅฎฝๅบฆ
* @property strokeColor Int ่ฟๅบฆๆก้ข่ฒ
* @property strokeBackground Int ่ฟๅบฆๆก่ๆฏ่ฒ
* @property background Int ่ๆฏ่ฒ
* @property fitWidth Boolean ๆฏๅฆๆ นๆฎๅฎฝๅบฆ้ๅบ้ซๅบฆ
* @property diameter Int ็ดๅพ
* @property backgroundPaint Paint ่ๆฏ็ป็ฌ
* @property textPaint Paint ๆๆฌ็ป็ฌ
* @property strokeBackgroundPaint Paint ่ฟๅบฆๆก่ๆฏ็ป็ฌ
* @property strokePaint Paint ่ฟๅบฆๆก็ป็ฌ
* @constructor ๆ้ ๅจ
*
* @author Letter([email protected])
* @since 1.0.0
*/
class ProgressView @JvmOverloads
constructor(context: Context, attrs: AttributeSet?=null, defStyleAttr: Int=0, defStyleRes: Int=0)
: View(context, attrs, defStyleAttr, defStyleRes){
var max = 0
set(value) {
field = value
invalidate()
}
var progress = 0
set(value) {
field = value
invalidate()
}
var text: String? = null
set(value) {
field = value
invalidate()
}
var textSize = 0f
set(value) {
field = value
invalidate()
}
var textColor = 0
set(value) {
field = value
invalidate()
}
var strokeWidth = 0f
set(value) {
field = value
invalidate()
}
var strokeColor = 0
set(value) {
field = value
invalidate()
}
private var strokeBackground by Delegates.notNull<Int>()
private var background by Delegates.notNull<Int>()
private var fitWidth by Delegates.notNull<Boolean>()
private var diameter = 0
private val backgroundPaint = Paint()
private val textPaint = Paint()
private val strokeBackgroundPaint = Paint()
private val strokePaint = Paint()
init {
val attrArray = context.obtainStyledAttributes(attrs, R.styleable.ProgressView)
textSize = attrArray.getDimension(R.styleable.ProgressView_android_textSize, context.dp2px(14))
textColor = attrArray.getColor(R.styleable.ProgressView_android_textColor, Color.BLACK)
strokeWidth = attrArray.getDimension(R.styleable.ProgressView_strokeWidth, context.dp2px(2))
strokeColor = attrArray.getColor(R.styleable.ProgressView_strokeColor, Color.TRANSPARENT)
strokeBackground = attrArray.getColor(R.styleable.ProgressView_strokeBackground, Color.TRANSPARENT)
background = attrArray.getColor(R.styleable.ProgressView_android_background, Color.TRANSPARENT)
fitWidth = attrArray.getBoolean(R.styleable.ProgressView_fitWidth, true)
max = attrArray.getInt(R.styleable.ProgressView_android_max, 100)
progress = attrArray.getInt(R.styleable.ProgressView_android_progress, 0)
text = attrArray.getString(R.styleable.ProgressView_android_text)
attrArray.recycle()
}
/**
* ็ปๅถView
* @param canvas Canvas ็ปๅธ
*/
override fun onDraw(canvas: Canvas?) {
super.onDraw(canvas)
diameter = min(width, height)
/* ็ปๅถ่ๆฏ */
backgroundPaint.color = background
backgroundPaint.style = Paint.Style.FILL
backgroundPaint.isAntiAlias = true
canvas?.drawCircle(width.toFloat() / 2, height.toFloat() / 2,
(min(width, height) / 2 - if (strokeWidth > 0) 1 else 0).toFloat(), backgroundPaint)
/* ็ปๅถ่พนๆก่ๆฏ */
strokeBackgroundPaint.color = strokeBackground
strokeBackgroundPaint.style = Paint.Style.STROKE
strokeBackgroundPaint.isAntiAlias = true
strokeBackgroundPaint.strokeWidth = strokeWidth
canvas?.drawCircle(width.toFloat() / 2, height.toFloat() / 2,
(diameter / 2).toFloat() - (strokeWidth) / 2, strokeBackgroundPaint)
/* ็ปๅถ่พนๆก */
strokePaint.color = strokeColor
strokePaint.style = Paint.Style.STROKE
strokePaint.isAntiAlias = true
strokePaint.strokeWidth = strokeWidth
canvas?.drawArc(((width - diameter) / 2).toFloat() + (strokeWidth) / 2,
((height - diameter) / 2).toFloat() + (strokeWidth) / 2,
(width - (width - diameter) / 2).toFloat() - (strokeWidth) / 2,
(height - (height - diameter) / 2).toFloat() - (strokeWidth) / 2,
-90f,
(progress.toFloat() * 360 / max.toFloat()),
false,
strokePaint)
/* ็ปๅถๆๆฌ */
if (text != null) {
textPaint.color = textColor
textPaint.style = Paint.Style.FILL
textPaint.isAntiAlias = true
textPaint.textSize = textSize
textPaint.textAlign = Paint.Align.CENTER
val fontMetricsInt = textPaint.fontMetricsInt
canvas?.drawText(text!!,
(width / 2).toFloat(),
(height / 2 - fontMetricsInt.descent + (fontMetricsInt.bottom - fontMetricsInt.top) / 2).toFloat(),
textPaint)
}
}
/**
* ๆต้ๆงไปถๅคงๅฐ
* @param widthMeasureSpec widthMeasureSpec
* @param heightMeasureSpec heightMeasureSpec
*/
override fun onMeasure(widthMeasureSpec: Int, heightMeasureSpec: Int) {
super.onMeasure(widthMeasureSpec, heightMeasureSpec)
val width = MeasureSpec.getSize(widthMeasureSpec)
val height = MeasureSpec.makeMeasureSpec(width, MeasureSpec.EXACTLY)
setMeasuredDimension(measureSize(widthMeasureSpec),
if (fitWidth) height else measureSize(heightMeasureSpec))
}
/**
* ๆต้ๅคงๅฐ
* @param measureSpec measureSpec
* @return ๆต้็ปๆ
*/
private fun measureSize(measureSpec: Int): Int {
val specMode = MeasureSpec.getMode(measureSpec)
val specSize = MeasureSpec.getSize(measureSpec)
return (if (specMode == MeasureSpec.EXACTLY) specSize
else (context.resources.displayMetrics.density * 50).toInt())
}
} |
var gulp = require('gulp');
var concat = require('gulp-concat');
var sass = require('gulp-sass');
var minifyCss = require('gulp-minify-css');
var watch = require('gulp-watch');
var STATIC_DEV = "./static-dev/";
var STATIC = './static/';
gulp.task('scripts', function() {
return gulp.src([
'lib/jquery/dist/jquery.js',
'lib/d3/d3.min.js',
'lib/colorbrewer/colorbrewer.js',
'lib/bootstrap-sass-official/assets/javascripts/bootstrap.min.js',
'lib/bootstrap-multiselect/dist/js/bootstrap-multiselect.js',
'lib/modernizr/modernizr.js',
'js/vis.js',
'js/main.js',
], {cwd: STATIC_DEV})
.pipe(concat('scripts.js'))
.pipe(gulp.dest(STATIC));
});
gulp.task('sass', function() {
return gulp.src(STATIC_DEV + 'scss/main.scss')
.pipe(sass())
.pipe(minifyCss())
.pipe(gulp.dest(STATIC));
});
gulp.task('favicon', function(){
return gulp.src(STATIC_DEV + 'favicon.ico').pipe(gulp.dest(STATIC));
});
// remove all the csv stuff after we add in the csv api
gulp.task('csv', function() {
return gulp.src(STATIC_DEV + 'csv/*.csv').pipe(gulp.dest(STATIC));
});
gulp.task('default', ['sass', 'scripts', 'favicon', 'csv'], function() {
watch([STATIC_DEV + '**/*.scss', STATIC_DEV + '**/*.js', STATIC_DEV + '**/*.csv'], ['sass', 'scripts', 'csv']);
});
|
<?php // rnsignup.php (YUI version)
include_once 'rnheader.php';
echo <<<_END
<script src="yahoo-min.js"></script>
<script src="event-min.js"></script>
<script src="connection-min.js"></script>
<script>
function checkUser(user)
{
if (user.value == '')
{
document.getElementById('info').innerHTML = ''
return
}
params = "user=" + user.value
callback = { success:successHandler, failure:failureHandler }
request = YAHOO.util.Connect.asyncRequest('POST',
'rncheckuser.php', callback, params);
}
function successHandler(o)
{
document.getElementById('info').innerHTML = o.responseText;
}
function failureHandler(o)
{
document.getElementById('info').innerHTML =
o.status + " " + o.statusText;
}
</script>
<h3>Sign up Form</h3>
_END;
$error = $user = $pass = "";
if (isset($_SESSION['user'])) destroySession();
if (isset($_POST['user']))
{
$user = sanitizeString($_POST['user']);
$pass = sanitizeString($_POST['pass']);
if ($user == "" || $pass == "")
{
$error = "Not all fields were entered<br /><br />";
}
else
{
$query = "SELECT * FROM rnmembers WHERE user='$user'";
if (mysql_num_rows(queryMysql($query)))
{
$error = "That username already exists<br /><br />";
}
else
{
$query = "INSERT INTO rnmembers VALUES('$user', '$pass')";
queryMysql($query);
}
die("<h4>Account created</h4>Please Log in.");
}
}
echo <<<_END
<form method='post' action='rnsignup.php'>$error
Username <input type='text' maxlength='16' name='user' value='$user'
onBlur='checkUser(this)'/><span id='info'></span><br />
Password <input type='text' maxlength='16' name='pass'
value='$pass' /><br />
<input type='submit' value='Signup' />
</form>
_END;
?>
|
๏ปฟ
namespace PrimeWeb.HpTypes;
public class HpVars
{
//TODO: Type implementation - CALChpvars
}
|
<?php
namespace Mnvx\PProcess\Command;
/**
* Description of command set for assertions
*/
class CommandSet
{
/**
* @var string[] Commands for testing
*/
protected $commands;
/**
* @var string Path where commands will be run
*/
protected $path;
/**
* @var int How many times to run commands
*/
protected $count;
/**
* Command constructor.
* @param string[] $commands Commands for testing
* @param string $path Path where commands will be run
* @param int|null $count How many times to run commands
*/
public function __construct(array $commands, string $path = null, int $count = null)
{
$this->commands = $commands;
$this->path = $path;
$this->count = $count;
}
/**
* @return string[]
*/
public function getCommands()
{
return $this->commands;
}
/**
* @return string
*/
public function getPath()
{
return $this->path;
}
/**
* @return int
*/
public function getCount()
{
return $this->count;
}
public function __toString()
{
return \sprintf(
'%s (path: %s, count: %s)',
implode(', ', $this->getCommands()),
$this->getPath() ?: 'not set',
$this->getCount() ?: 'not set'
);
}
} |
#!/bin/bash
WORK_DIR=$(git rev-parse --show-toplevel)/build
CURRENT_DIR=$(pwd)
OUTPUT_DIR=$WORK_DIR/opencv
if [[ -d $OUTPUT_DIR ]]; then
rm -rf $OUTPUT_DIR
fi
if [[ ! -d $WORK_DIR ]]; then
mkdir -p $WORK_DIR
fi
cd $WORK_DIR
# download opencv if not already downloaded
if [[ ! -d opencv-4.5.2 ]]; then
if [[ ! $( which wget ) ]]; then
echo "wget is not available, please install it e.g. `$ brew install wget`"
exit 1
fi
wget https://github.com/opencv/opencv/archive/refs/tags/4.5.2.tar.gz
tar xzf 4.5.2.tar.gz
rm 4.5.2.tar.gz
fi
cd opencv-4.5.2/
if [[ ! -d build ]]; then
mkdir build
else
rm -f build/CMakeCache.txt
fi
cd build
# build opencv minimal (just core and imgproc)
cmake .. \
-DCMAKE_OSX_DEPLOYMENT_TARGET=10.13 \
-DBUILD_SHARED_LIBS=OFF \
-DBUILD_opencv_apps=OFF \
-DBUILD_opencv_js=OFF \
-DBUILD_ANDROID_PROJECTS=OFF \
-DBUILD_ANDROID_EXAMPLES=OFF \
-DBUILD_DOCS=OFF \
-DBUILD_EXAMPLES=OFF \
-DBUILD_PACKAGE=OFF \
-DBUILD_PERF_TESTS=OFF \
-DBUILD_TESTS=OFF \
-DBUILD_WITH_DEBUG_INFO=OFF \
-DBUILD_WITH_STATIC_CRT=OFF \
-DBUILD_WITH_DYNAMIC_IPP=OFF \
-DBUILD_FAT_JAVA_LIB=OFF \
-DBUILD_ANDROID_SERVICE=OFF \
-DBUILD_CUDA_STUBS=OFF \
-DBUILD_JAVA=OFF \
-DBUILD_OBJC=OFF \
-DBUILD_opencv_python3=OFF \
-DINSTALL_CREATE_DISTRIB=OFF \
-DINSTALL_BIN_EXAMPLES=OFF \
-DINSTALL_C_EXAMPLES=OFF \
-DINSTALL_PYTHON_EXAMPLES=OFF \
-DINSTALL_ANDROID_EXAMPLES=OFF \
-DINSTALL_TO_MANGLED_PATHS=OFF \
-DINSTALL_TESTS=OFF \
-DBUILD_opencv_calib3d=OFF \
-DBUILD_opencv_core=ON \
-DBUILD_opencv_dnn=OFF \
-DBUILD_opencv_features2d=OFF \
-DBUILD_opencv_flann=OFF \
-DBUILD_opencv_gapi=OFF \
-DBUILD_opencv_highgui=OFF \
-DBUILD_opencv_imgcodecs=OFF \
-DBUILD_opencv_imgproc=ON \
-DBUILD_opencv_ml=OFF \
-DBUILD_opencv_objdetect=OFF \
-DBUILD_opencv_photo=OFF \
-DBUILD_opencv_stitching=OFF \
-DBUILD_opencv_video=OFF \
-DBUILD_opencv_videoio=OFF \
-DWITH_PNG=OFF \
-DWITH_JPEG=OFF \
-DWITH_TIFF=OFF \
-DWITH_WEBP=OFF \
-DWITH_OPENJPEG=OFF \
-DWITH_JASPER=OFF \
-DWITH_OPENEXR=OFF \
-DWITH_FFMPEG=OFF \
-DWITH_GSTREAMER=OFF \
-DWITH_1394=OFF \
-DCMAKE_INSTALL_PREFIX=$OUTPUT_DIR \
-DWITH_PROTOBUF=OFF \
-DBUILD_PROTOBUF=OFF \
-DWITH_CAROTENE=OFF \
-DWITH_EIGEN=OFF \
-DWITH_OPENVX=OFF \
-DWITH_CLP=OFF \
-DWITH_DIRECTX=OFF \
-DWITH_VA=OFF \
-DWITH_LAPACK=OFF \
-DWITH_QUIRC=OFF \
-DWITH_ADE=OFF \
&& \
cmake --build . --target install -- -j8
cd "$CURRENT_DIR"
|
# kegg2cyjs
kegg2cyjs converts KEGG pathway XML(KGML) to Cytoscape.js JSON (.cyjs).
|
<?php
namespace Reverb\Reverbsync\Controller\Adminhtml\Reverbsync;
use Magento\Backend\App\Action\Context;
use Magento\Framework\View\Result\PageFactory;
class Staledelete extends \Magento\Backend\App\Action{
protected $resultPageFactory;
public function __construct(Context $context, PageFactory $resultPageFactory) {
parent::__construct($context);
$this->resultPageFactory = $resultPageFactory;
}
public function execute(){
try {
$objectManager = \Magento\Framework\App\ObjectManager::getInstance();
$syncobj = $objectManager->create('\Reverb\ProcessQueue\Model\Cron\Delete\Stale\Successful');
$syncobj->deleteStaleSuccessfulQueueTasks();
echo 'success';
exit;
} catch(\Exception $e){
echo 'logging error = ';
echo $e->getMessage();
}
}
}
?> |
A simple example of using the Digital Ocean [Test Kitchen](http://kitchen.ci) driver
with the shell provisioner.
This sort of setup is ideal for running on a continuous integration
system.
[](https://app.wercker.com/project/bykey/7d06dbda62f83ed243556ea971695632)
## Installation
With install the dependencies:
```bash
bundle install
```
## Usage
Next run the tests:
```bash
bundle exec kitchen test -c
```
Note that the `-c` options means our tests will all run in parallel. In
the same of this example that means against Ubuntu 13.10 and 12.04. The
`kitchen list` command will provide a list of the tests to be run.
|
import numpy as np
from bilevel_imaging_toolbox import solvers
from bilevel_imaging_toolbox import plot_utils
### Testing LASSO solver using ISTA method
m,n = 15,20
# Create random A matrix
rng = np.random.RandomState(42)
A = rng.randn(m,n)
# Create initial point
x0 = rng.randn(n)
x0[x0 < 0.9] = 0 # Add sparcity to the initial point
b = np.dot(A,x0)
clambda = 0.9 # Regularization parameter
iters = 200
# Run ISTA Solver
(x_ista,vallog_ista) = solvers.ista_LASSO(x0,A,b,clambda,iters)
# Run FISTA Solver
(x_fista, vallog_fista) = solvers.fista_LASSO(x0,A,b,clambda,iters)
# Plot error evolution
plot_utils.plot_collection([vallog_ista,vallog_fista],['ISTA','FISTA'],title="LASSO ISTA - FISTA evolution")
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010, 2011 Rocky Bernstein <[email protected]>
require_relative '../base/subcmd'
class Trepan::Subcommand::ShowConfirm < Trepan::ShowBoolSubcommand
unless defined?(HELP)
Trepanning::Subcommand.set_name_prefix(__FILE__, self)
HELP = "Show confirm potentially dangerous operations setting"
MIN_ABBREV = 'co'.size
end
end
if __FILE__ == $0
# Demo it.
require_relative '../../mock'
# FIXME: DRY the below code
dbgr, cmd = MockDebugger::setup('show')
subcommand = Trepan::Subcommand::ShowConfirm.new(cmd)
testcmdMgr = Trepan::Subcmd.new(subcommand)
subcommand.run_show_bool
name = File.confirm(__FILE__, '.rb')
subcommand.summary_help(name)
end
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
"text/template"
"github.com/globalsign/mgo"
"github.com/gorilla/schema"
"github.com/haisum/recaptcha"
"github.com/julienschmidt/httprouter"
"github.com/mariaefi29/blog/config"
"github.com/mariaefi29/blog/models"
"github.com/pkg/errors"
"gopkg.in/gomail.v2"
)
const (
ServerErrorMessage = "ะัะพะธะทะพัะปะฐ ะพัะธะฑะบะฐ ัะตัะฒะตัะฐ. ะะพะฟัะพะฑัะนัะต ะตัั ัะฐะท ะฟะพะทะถะต."
noShowFieldSubscribe = 454
noShowFieldCommentAndMessage = 776
)
type dataToSend struct {
Message string `json:"message"`
NewLike int `json:"likes"`
}
var tpl *template.Template
var fm = template.FuncMap{
"truncate": truncate,
"incline": commentIncline,
}
func truncate(s string) string {
var numRunes = 0
for index := range s {
numRunes++
k := rune(s[index])
if (numRunes > 150) && (k == 32) {
return s[:index]
}
}
return s
}
func commentIncline(cnt int) string {
var s string
if (cnt == 1) || (cnt > 20 && cnt%10 == 1) {
s = "ะะพะผะผะตะฝัะฐัะธะน"
} else if (cnt >= 2 && cnt <= 4) || (cnt > 20 && cnt%10 >= 2 && cnt%10 <= 4) {
s = "ะะพะผะผะตะฝัะฐัะธั"
} else {
s = "ะะพะผะผะตะฝัะฐัะธะตะฒ"
}
s = strconv.Itoa(cnt) + " " + s
return s
}
type message struct {
name string
email string
content string
}
var d = gomail.NewDialer("smtp.mail.ru", 465, config.SMTPEmail, config.SMTPPassword)
func init() {
tpl = template.Must(template.New("").Funcs(fm).ParseGlob("templates/*.gohtml"))
}
func index(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {
posts, err := models.AllPosts()
if err != nil {
http.Error(w, errors.Wrap(err, "find all posts").Error(), http.StatusInternalServerError)
return
}
if err := tpl.ExecuteTemplate(w, "index.gohtml", posts); err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
log.Println(errors.Wrap(err, "execute template index"))
}
}
func show(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
id := ps.ByName("id")
if id == "" {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
post, err := models.OnePost(id)
if err != nil {
http.NotFound(w, r)
return
}
if err := tpl.ExecuteTemplate(w, "show.gohtml", post); err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
log.Println(errors.Wrap(err, "execute template show"))
}
}
func about(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
if err := tpl.ExecuteTemplate(w, "about.gohtml", nil); err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
log.Println(errors.Wrap(err, "execute template about"))
}
}
func contact(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {
if err := tpl.ExecuteTemplate(w, "contact.gohtml", nil); err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
log.Println(errors.Wrap(err, "execute template contact"))
}
}
func sendMessage(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {
if err := req.ParseForm(); err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
}
xcode3, err := strconv.Atoi(req.FormValue("xcode3"))
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
if xcode3 != noShowFieldCommentAndMessage {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
msg := &message{
name: req.FormValue("name"),
email: req.FormValue("email"),
content: req.FormValue("message"),
}
messageToEmail := fmt.Sprintf("<b>ะกะพะพะฑัะตะฝะธะต</b>: %s \n <b>ะั</b>: %s, %s", msg.content, msg.email, msg.name)
if err := sendMessageToEmail("ะะปะพะณ/ะบะพะฝัะฐะบัะฝะฐั ัะพัะผะฐ", messageToEmail); err != nil {
log.Println(errors.Wrap(err, "send new message to email"))
_, _ = fmt.Fprint(w, ServerErrorMessage)
return
}
_, _ = fmt.Fprint(w, "ะะฐัะต ัะพะพะฑัะตะฝะธะต ััะฟะตัะฝะพ ะพัะฟัะฐะฒะปะตะฝะพ!")
}
func subscribe(w http.ResponseWriter, req *http.Request, _ httprouter.Params) {
email := models.Email{
EmailAddress: req.FormValue("email"),
}
noshow, err := strconv.Atoi(req.FormValue("noshow"))
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
if email.EmailAddress == "" {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
if noshow != noShowFieldSubscribe {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
re := recaptcha.R{
Secret: config.ReCaptchaSecretCode,
}
recaptchaResp := req.FormValue("g-recaptcha-response")
if !re.VerifyResponse(recaptchaResp) {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
err = models.CreateEmail(email)
if err != nil && mgo.IsDup(errors.Cause(err)) {
_, _ = fmt.Fprint(w, "ะั ัะถะต ะฑัะปะธ ะฟะพะดะฟะธัะฐะฝั ะฝะฐ ะพะฑะฝะพะฒะปะตะฝะธั ะฑะปะพะณะฐ!")
return
}
if err != nil {
log.Println(err)
_, _ = fmt.Fprint(w, ServerErrorMessage)
return
}
_, _ = fmt.Fprint(w, "ะั ััะฟะตัะฝะพ ะฟะพะดะฟะธัะฐะฝั ะฝะฐ ะพะฑะฝะพะฒะปะตะฝะธั ะฑะปะพะณะฐ!")
messageToEmail := fmt.Sprintf("ะะพะฟัะธะฒะตัััะฒัะนัะต ะฝะพะฒะพะณะพ ะฟะพะดะฟะธััะธะบะฐ: %s.", email.EmailAddress)
if err := sendMessageToEmail("ะะปะพะณ/ะฝะพะฒัะน ะฟะพะดะฟะธััะธะบ", messageToEmail); err != nil {
log.Println(errors.Wrap(err, "send new subscriber to email"))
}
}
func category(w http.ResponseWriter, _ *http.Request, ps httprouter.Params) {
category := ps.ByName("category")
if category == "" {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
posts, err := models.PostsByCategory(category)
if err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
if err := tpl.ExecuteTemplate(w, "category.gohtml", posts); err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
log.Println(errors.Wrap(err, "execute template category"))
}
}
func comment(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
id := ps.ByName("id")
if id == "" {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
xcode2, err := strconv.Atoi(req.FormValue("xcode2"))
if err != nil {
log.Println(err)
}
if xcode2 != noShowFieldCommentAndMessage {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
if err := req.ParseForm(); err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
comment := models.Comment{}
decoder := schema.NewDecoder()
decoder.IgnoreUnknownKeys(true)
err = decoder.Decode(&comment, req.PostForm)
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
// validate form values
if comment.Email == "" || comment.Author == "" || comment.Content == "" {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
post, err := models.CreateComment(comment, id)
if err != nil {
_, _ = fmt.Fprint(w, ServerErrorMessage)
log.Println(err)
return
}
_, _ = fmt.Fprint(w, "ะะฐั ะบะพะผะผะตะฝัะฐัะธะน ััะฟะตัะฝะพ ะทะฐะฟะธัะฐะฝ ะธ ะฟัะพั
ะพะดะธั ะผะพะดะตัะฐัะธั!")
messageToEmail := constructMessageToEmail(post.Name, comment.Author, comment.Content)
if err := sendMessageToEmail("ะะปะพะณ/ะฝะพะฒัะน ะบะพะผะผะตะฝัะฐัะธะน", messageToEmail); err != nil {
log.Println(errors.Wrap(err, "send comment to email"))
}
}
func like(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
id := ps.ByName("id")
if id == "" {
http.Error(w, http.StatusText(400), http.StatusBadRequest)
return
}
post, err := models.OnePost(id)
if err != nil {
http.NotFound(w, req)
}
_, err = req.Cookie(ps.ByName("id"))
if err != nil {
http.SetCookie(w, &http.Cookie{
Name: ps.ByName("id"),
Value: "1",
})
newLike, err := models.PostLike(post)
if err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
log.Println(err)
return
}
sendData := dataToSend{
Message: "ะกะฟะฐัะธะฑะพ! ะะฐัะต ะผะฝะตะฝะธะต ัััะตะฝะพ!",
NewLike: newLike,
}
jsonSendData, _ := json.Marshal(sendData)
_, _ = fmt.Fprint(w, string(jsonSendData))
return
}
sendData := dataToSend{
Message: "ะะฐัะต ะผะฝะตะฝะธะต ัะถะต ะฑัะปะพ ัััะตะฝะพ! ะกะฟะฐัะธะฑะพ!",
NewLike: post.Likes,
}
jsonSendData, _ := json.Marshal(sendData)
_, _ = fmt.Fprint(w, string(jsonSendData))
}
func sendMessageToEmail(subject, message string) error {
m := gomail.NewMessage()
m.SetHeader("From", config.SMTPEmail)
m.SetHeader("To", "[email protected]")
m.SetAddressHeader("reply-to", config.SMTPEmail, "ะะฐัะธั")
m.SetHeader("Subject", subject)
m.SetBody("text/html", message)
if err := d.DialAndSend(m); err != nil {
return err
}
return nil
}
func constructMessageToEmail(name, author, content string) string {
return fmt.Sprintf(
"ะะพัั <b>%s</b> ะฑัะป ะฟัะพะบะพะผะผะตะฝัะธัะพะฒะฐะฝ ะฟะพะปัะทะพะฒะฐัะตะปะตะผ <b>%s</b>: %s.<br> ะะตะพะฑั
ะพะดะธะผะฐ ะผะพะดะตัะฐัะธั.",
name, author, content,
)
}
|
---
layout: post
title: "[learning javascript] chapter 12. ์ดํฐ๋ ์ดํฐ์ ์ ๋๋ ์ดํฐ"
description: " "
date: 2021-06-18
tags: [javascript]
comments: true
share: true
---
# ์ดํฐ๋ ์ดํฐ์ ์ ๋๋ ์ดํฐ
- '์ง๊ธ ์ด๋ ์๋์ง' ํ์ํ ์ ์๋๋ก ๋๋๋ค๋ ๋ฉด์์ ์ฑ
๊ฐ๋น์ ๋น์ทํ ๊ฐ๋
- ๋ฐฐ์ด์ ๊ฒฝ์ฐ ์ดํฐ๋ฌ๋ธ ๊ฐ์ฒด์ ์ข์ ์
```javascript
const book = [
"Twinkle. twinkle, little bat!",
"How I wonder what you're at!",
"Up above the world you fly,",
"Like a tea tray in the sky.",
"Twinkle, twinkle, little bat!",
"How I wonder what you're at!"
];
```
- ์ด ๋ฐฐ์ด์ values๋ฉ์๋๋ฅผ ์ฌ์ฉํ์ฌ ์ดํฐ๋ ์ดํฐ๋ฅผ ๋ง๋ค ์ ์์
```javascript
const it = book.values();
```
- ์ดํฐ๋ ์ดํฐ๋ฅผ ์์ํ๋ ค๋ฉด next๋ฉ์๋๋ฅผ ํธ์ถํด์ผ ํจ
- ์ด ๋ฉ์๋๊ฐ ๋ฐํํ๋ ๊ฐ์ฒด์๋ `value`ํ๋กํผํฐ์ `done`ํ๋กํผํฐ๊ฐ ์์
```javascript
it.next(); // { value: "Twinkle. twinkle, little bat!", done: false }
it.next(); // { value: "How I wonder what you're at!", done: false }
it.next(); // { value: "Up above the world you fly,", done: false }
it.next(); // { value: "Like a tea tray in the sky.", done: false }
it.next(); // { value: "Twinkle, twinkle, little bat!", done: false }
it.next(); // { value: "How I wonder what you're at!" done: false }
it.next(); // { value: undefined, done: true}
it.next(); // { value: undefined, done: true}
it.next(); // { value: undefined, done: true}
```
- ๋ ์งํํ ๊ฒ์ด ์์ผ๋ฉด `value`๋ `undefined`๊ฐ ๋์ง๋ง `next`๋ ๊ณ์ ํธ์ถ ํ ์ ์์
- ์ดํฐ๋ ์ดํฐ๊ฐ ๋๊น์ง ์งํํ๋ฉด ๋ค๋ก ๋์๊ฐ์ ๋ค๋ฅธ ๋ฐ์ดํฐ๋ฅผ ์ ๊ณตํ ์ ์์
- ๋ฐฐ์ด์ ์์๋ฅผ ๋์ดํ๋ ๊ฒ์ด ๋ชฉ์ ์ด๋ผ๋ฉด, for๋ฃจํ๋ for...of๋ฅผ ์ฌ์ฉํ ์ ์์
- ์ดํฐ๋ ์ดํฐ์ `while`๋ฅผ ์ฌ์ฉํ๋ฉด `for...of`์ ๋น์ทํ๊ฒ ๋์ํ๊ฒ ๋ง๋ค ์ ์์
```javascript
const it = book.values();
let current = it.next();
while(!current.done) {
console.log(current.value);
current = it.next();
}
```
- ์ดํฐ๋ ์ดํฐ๋ ๋ชจ๋ ๋
๋ฆฝ์
- ์ ์ดํฐ๋ ์ดํฐ๋ฅผ ๋ง๋ค ๋๋ง๋ค ์ฒ์์์ ์์
- ๊ฐ๊ฐ ๋ค๋ฅธ ์์๋ฅผ ๊ฐ๋ฆฌํค๋ ์ดํฐ๋ ์ดํฐ ์ฌ๋ฌ ๊ฐ๋ฅผ ๋์์ ์ฌ์ฉํ ์๋ ์์
```javascript
const it1 = book.values();
const it2 = book.values();
// it1์ผ๋ก ๋ ํ์ด์ง๋ฅผ ์ฝ์
it1.next(); // { value: "Twinkle. twinkle, little bat!", done: false }
it1.next(); // { value: "How I wonder what you're at!", done: false }
// it2๋ก ํ ํ์ด์ง ์ฝ์
it2.next(); // { value: "Twinkle. twinkle, little bat!", done: false }
// it1๋ก ํ ํ์ด์ง ๋ ์ฝ์
it1.next(); // { value: "Up above the world you fly,", done: false }
```
## ์ดํฐ๋ ์ด์
ํ๋กํ ์ฝ
- ์ดํฐ๋ ์ดํฐ๋ ๊ทธ ์์ฒด๋ณด๋ค๋ ๋ ์ธ๋ชจ ์๋ ๋์์ด ๊ฐ๋ฅํด์ง๋๋ก ํ๋ค๋ ๋ฐ ์๋ฏธ๊ฐ ์์
- ์ดํฐ๋ ์ดํฐ ํ๋กํ ์ฝ์ ๋ชจ๋ ๊ฐ์ฒด๋ฅผ ์ดํฐ๋ฌ๋ธ ๊ฐ์ฒด๋ก ๋ฐ๊ฟ ์ ์์
- ๋ฉ์์ง์ ํ์์คํฌํ๋ฅผ ๋ถ์ด๋ ๋ก๊ทธ ํด๋์ค
```javascript
class Log {
constructor() {
this.messages = [];
}
add(message) {
this.messages.push({ message, timestamp: Date.now() });
}
}
```
- ๋ก๊ทธ๋ฅผ ๊ธฐ๋กํ ํญ๋ชฉ์ ์ํ(iterate)ํ๊ณ ์ถ๋ค๋ฉด ์ด๋ป๊ฒ?
- `log.messages`์ ์ ๊ทผํ ์ ์์ง๋ง, log๋ฅผ ๋ฐฐ์ด ์กฐ์ํ๋ฏ ํ ์ ์๋ค๋ฉด ๋ ์ข์ ๊ฒ์
- ์ดํฐ๋ ์ด์
ํ๋กํ ์ฝ์ ํด๋์ค์ ์ฌ๋ณผ ๋ฉ์๋ `Symbol.iterator`๊ฐ ์๊ณ ์ด ๋ฉ์๋๊ฐ ์ดํฐ๋ ์ดํฐ์ฒ๋ผ ๋์ํ๋ ๊ฐ์ฒด๋ฅผ ๋ฐํํ๋ค๋ฉด ๊ทธ ํด๋์ค์ ์ธ์คํด์ค๋ ์ดํฐ๋ฌ๋ธ ๊ฐ์ฒด๋ผ๋ ๋ป
- `Log`ํด๋์ค์ `Symbol.iterator` ๋ฉ์๋๋ฅผ ์ถ๊ฐ
```javascript
class Log {
constructor() {
this.messages = [];
}
add(message) {
this.messages.push({ message, timestamp: Date.now() });
}
[Symbol.iterator]() {
return this.messages.values();
}
}
```
- ์ด์ `Log`์ธ์คํด์ค๋ฅผ ๋ฐฐ์ด์ฒ๋ผ ์ํํ ์ ์์
```javascript
const log = new Log();
log.add("first day at sea");
log.add("spotted whale");
log.add("spotted another vessel");
// ๋ก๊ทธ๋ฅผ ๋ฐฐ์ด์ฒ๋ผ ์ํ
for(let entry of log) {
console.log(`${entry.message} @ ${entry.timestamp}`);
}
```
- ์ง์ ์ดํฐ๋ ์ดํฐ๋ฅผ ๋ง๋ค ์๋ ์์
```javascript
class Log {
constructor() {
this.messages = [];
}
add(message) {
this.messages.push({ message, timestamp: Date.now() });
}
[Symbol.iterator]() {
let i = 0;
const messages = this.messages;
return {
next() {
if(i >= messages.length)
return { value: undefined, done: true };
return { value: messages[i++], done: false };
}
};
}
}
```
- ํผ๋ณด๋์น ์์ด์ฒ๋ผ ๋ฌดํํ ๋ฐ์ดํฐ์๋ ์ฌ์ฉํ ์ ์์
- ๋จ์ง ์ดํฐ๋ ์ดํฐ์ `done`์ด ์ ๋ `true`๋ฅผ ๋ฐํํ์ง ์๋ ๊ฒ์ผ ๋ฟ
```javascript
class FibonacciSequence {
[Symbol.iterator]() {
let a = 0, b = 1;
return {
next() {
let rval = { value: b, done: false };
b += a;
a = rval.value;
return rval;
}
};
}
}
```
- `for...of`๋ฅผ ์ฌ์ฉํ์ฌ `FibonacciSequence`๋ฅผ ๊ณ์ฐํ๋ฉด ๋ฌดํ ๋ฃจํ์ ๋น ์ง๋ฏ๋ก `break`๋ฌธ์ ์ฌ์ฉ
```javascript
const fib = new FibonacciSequence();
let i = 0;
for(let n of fib) {
console.log(n);
if(++i > 9) break;
}
```
## ์ ๋๋ ์ดํฐ
- ์ ๋๋ ์ดํฐ๋ ์ดํฐ๋ ์ดํฐ๋ฅผ ์ฌ์ฉํด ์์ ์ ์คํ์ ์ ์ดํ๋ ํจ์
- ์ผ๋ฐ์ ์ธ ํจ์๋ ๋งค๊ฐ๋ณ์๋ฅผ ๋ฐ๊ณ ๊ฐ์ ๋ฐํํ์ง๋ง, ํธ์ถ์๋ ๋งค๊ฐ๋ณ์ ์ธ์๋ ํจ์์ ์คํ์ ์ ์ดํ ๋ฐฉ๋ฒ์ด ์์
- ํจ์๋ฅผ ํธ์ถํ๋ฉด ์ข
๋ฃ๋ ๋๊น์ง ์ ์ด๊ถ์ ๋๊ธฐ๋ ๊ฒ์ด์ง๋ง ์ ๋๋ ์ดํฐ์์๋ ๊ทธ๋ ์ง ์์
- ์ ๋๋ ์ดํฐ๋ ๋ ๊ฐ์ง ์๋ก์ด ๊ฐ๋
์ ๋์
- ํจ์์ ์คํ์ ๊ฐ๋ณ์ ๋จ๊ณ๋ก ๋๋์ผ๋ก์จ ํจ์์ ์คํ์ ์ ์ด
- ์คํ ์ค์ธ ํจ์์ ํต์
- ์ ๋๋ ์ดํฐ๋ ๋ ๊ฐ์ง ์์ธ๋ฅผ ์ ์ธํ๋ฉด ์ผ๋ฐ์ ์ธ ํจ์์ ๊ฐ์
- ์ ๋๋ ์ดํฐ๋ ์ธ์ ๋ ํธ์ถ์์๊ฒ ์ ์ด๊ถ์ ๋๊ธธ(yield) ์ ์์
- ์ ๋๋ ์ดํฐ๋ ํธ์ถํ ์ฆ์ ์คํ๋์ง ์์. ๋์ ์ดํฐ๋ ์ดํฐ๋ฅผ ๋ฐํํ๊ณ , ์ดํฐ๋ ์ดํฐ์ `next`๋ฉ์๋๋ฅผ ํธ์ถ
- ๋ฌด์ง๊ฐ ์๊น์ ๋ฐํํ๋ ์ ๋๋ ์ดํฐ ์์
```javascript
function* rainbow() { // * ๊ธฐํธ๋ ์ ๋๋ ์ดํฐ ๋ฌธ๋ฒ
yield 'red';
yield 'orange';
yield 'yellow';
yield 'green';
yield 'blue';
yield 'indigo';
yield 'violet';
}
```
- ์ ๋๋ ์ดํฐ๋ฅผ ํธ์ถํ๋ฉด ์ดํฐ๋ ์ดํฐ๋ฅผ ์ป์
```javascript
const it = rainbow();
it.next(); // { value: "red", done: false }
it.next(); // { value: "orange", done: false }
it.next(); // { value: "yellow", done: false }
it.next(); // { value: "green", done: false }
it.next(); // { value: "blue", done: false }
it.next(); // { value: "indigo", done: false }
it.next(); // { value: "violet", done: false }
it.next(); // { value: undefined, done: true }
```
#### yield ํํ์๊ณผ ์๋ฐฉํฅ ํต์
- ํธ์ถ์์์ ํต์ ์ `yield`ํํ์์ ํตํจ
- ํํ์์ ๊ฐ์ผ๋ก ํ๊ฐ๋๊ณ `yield`๋ ํํ์์ผ๋ฏ๋ก ๋ฐ๋์ ์ด๋ค ๊ฐ์ผ๋ก ํ๊ฐ
- `yield` ํํ์์ ๊ฐ์ ํธ์ถ์๊ฐ ์ ๋๋ ์ดํฐ์ ์ดํฐ๋ ์ดํฐ์์ `next`๋ฅผ ํธ์ถํ ๋ ์ ๊ณตํ๋ ๋งค๊ฐ๋ณ์
```javascript
function* interrogate() {
const name = yield "What is your name?";
const color = yield "What is your favorite color?";
return `${name}'s favorite color is ${color}.`;
}
```
- `next`๋ฅผ ํธ์ถํ๋ฉด ์ฒซ ๋ฒ์งธํ์ `yield`ํํ์์ด ๋ค์ด ์์ผ๋ฏ๋ก ์ ๋๋ ์ดํฐ๋ ๋ฐ๋์ ์ ์ด๊ถ์ ํธ์ถ์์๊ฒ ๋๊ฒจ์ผ ํจ
- ์ ๋๋ ์ดํฐ์ ์ฒซ ๋ฒ์งธ ํ์ด ์๋ฃ(resolve)๋๋ ค๋ฉด ํธ์ถ์๊ฐ `next`๋ฅผ ๋ค์ ํธ์ถํด์ผ ํจ
- ๊ทธ๋ฌ๋ฉด `name`์ `next`์์ ์ ๋ฌํ๋ ๊ฐ์ ๋ฐ์
```javascript
const it = interrogate();
it.next(); // { value: "What is your name?", done: false }
it.next('Ethan'); // { value: "What is your favorite color?", done: false }
it.next('orange'); // { value: "Ethan's favorite color is orange.", done: false }
```
- ์ ๋๋ ์ดํฐ๋ ํ์ดํ ํ๊ธฐ๋ฒ์ผ๋ก ๋ง๋ค ์ ์์ผ๋ฉด ๋ฐ๋์ `function*`์ ์จ์ผ ํจ
#### ์ ๋๋ ์ดํฐ์ return
- `yield`๋ฌธ์ ์ ๋๋ ์ดํฐ์ ๋ง์ง๋ง ๋ฌธ์ด๋๋ผ๋ ์ ๋๋ ์ดํฐ๋ฅผ ๋๋ด์ง ์์
- ์ ๋๋ ์ดํฐ์์ `return`๋ฌธ์ ์ฌ์ฉํ๋ฉด ๊ทธ ์์น์ ์๊ด์์ด `done`์ `true`๊ฐ ๋๊ณ , `value` ํ๋กํผํฐ๋ `return`์ด ๋ฐํํ๋ ๊ฐ์ด ๋จ
```javascript
function* abc() {
yield 'a';
yield 'b';
return 'c';
}
const it = abc();
it.next(); // { value: 'a', done: false }
it.next(); // { value: 'b', done: false }
it.next(); // { value: 'c', done: true }
```
- ์ ๋๋ ์ดํฐ๋ฅผ ์ฌ์ฉํ ๋๋ ๋ณดํต `done`์ด `true`์ด๋ฉด `value`ํ๋กํผํฐ์ ์ฃผ์๋ฅผ ๊ธฐ์ธ์ด์ง ์์
- ์๋ฅผ ๋ค์ด ์ด ์ ๋๋ ์ดํฐ๋ฅผ `for...of`์์ ์ฌ์ฉํ๋ฉด `c`๋ ์ ๋ ์ถ๋ ฅ๋์ง ์์
- ์ ๋๋ ์ดํฐ์์ ์ค์ํ ๊ฐ์ `return`์ผ๋ก ๋ฐํํ๋ ค ํ๋ฉด ์๋จ
- ์ ๋๋ ์ดํฐ๊ฐ ๋ฐํํ๋ ๊ฐ์ ์ฌ์ฉํ๋ ค ํ ๋๋ `yield`๋ฅผ ์จ์ผ ํ๊ณ , `return`์ ์ ๋๋ ์ดํฐ ์ค๊ฐ์ ์ข
๋ฃํ๋ ๋ชฉ์ ์ผ๋ก๋ง ์ฌ์ฉํด์ผ ํจ
|
# Tests
The tests directory contains integration tests that require a pre-seeded
database.
Each tests/<group> contains a seed.txt file at tests/<group>/seed.txt, which is
used by the docker seeddb container to setup the test database.
The tests can be run using tests/<group>/run.sh.
## Search Tests
The tests/search/scripts directory contains test scripts \*.txt that are run
using tests/search/run.sh.
These scripts follow the format:
```
Test name A
search query A
Expected result A1
Expected result A2
Test name B
search query B
Expected result B1
Expected result B2
...
```
Each test set is separated by a newline. Comments (lines that being with a #),
are ignored.
The imported by counts for a dataset can also be updated in
search/importedby.txt. The file has the format:
```
<package-path>, <imported-by-count>
```
It is expected that the modules for these packages are in
tests/search/seed.txt.
## Symbol History API Tests
The tests/api/scripts directory contains tests that are run
using tests/api/run.sh.
The tests compare API data for a module `tests/api/testdata` with the API
history on the frontend versions page. The frontend that is used for comparison
must have `GO_DISCOVERY_SERVE_STATS` set so that the
`/<path>?tab=versions&m=json` endpoint is available.
### Adding a package
Data in `tests/api/testdata` is generated by running:
```
go run tests/api/main.go generate [module path]:[package path suffix]
```
For example, `go run tests/api/main.go generate github.com/julieqiu/api-demo:pkg` for package
`github.com/julieqiu/api-demo/pkg`.
This data can now be used to check the API history on a frontend by running:
```
go run tests/api/main.go compare [module path]:[package path suffix]
```
## End-to-End (E2E) Tests
Th e2e/ directory contains end-to-end tests for pages on pkg.go.dev, which can
be run using `./tests/e2e/run.sh`.
### Running E2E Tests
In order to run the tests, run this command from the root of the repository:
```
./tests/e2e/run.sh
```
To run a set of tests with a custom experiment set active create a directory
with a config.yaml file and a seed.txt file if a custom set of seed modules
is desired. Then run `./tests/e2e/run.sh <directory>`.
`./tests/e2e/run.sh` sets up a series of docker containers that run a postgres
database, frontend, and headless chrome, and runs the e2e tests using headless
chrome.
Alternatively, you can run the tests against a website that is already running.
First run headless chrome:
docker run --rm -e "CONNECTION_TIMEOUT=-1" -p 3000:3000 browserless/chrome:1.46-chrome-stable
Then run the tests from the root of pkgsite:
./all.bash npx jest [files]
`PKGSITE_URL` can https://pkg.go.dev, or http://localhost:8080 if you have a
local instance for the frontend running.
### Understanding Test Failures
If the tests failure, diffs will be created that show the cause of the failure.
Timeouts and diff thresholds are configurable for image snapshots if
adjustments are needed to prevent test flakiness. See the
[API](https://github.com/americanexpress/jest-image-snapshot#%EF%B8%8F-api) for
jest image snapshots for more information.
### Writing E2E Tests
Tests are written in the Jest framework using Puppeteer to drive a headless
instance of Chrome.
Familiarize yourself with the
[Page](https://pptr.dev/#?product=Puppeteer&version=v5.5.0&show=api-class-page)
class from the Puppeteer documenation. You'll find methods on this class that
let you to interact with the page.
Most tests will follow a similar structure but for details on the Jest
framework and the various hooks and assertions see the
[API](https://jestjs.io/docs/en/api).
|
;;;; special-forms.lisp
;;;;
;;;; Copyright 2017 Alexander Gutev
;;;;
;;;; Permission is hereby granted, free of charge, to any person
;;;; obtaining a copy of this software and associated documentation
;;;; files (the "Software"), to deal in the Software without
;;;; restriction, including without limitation the rights to use,
;;;; copy, modify, merge, publish, distribute, sublicense, and/or sell
;;;; copies of the Software, and to permit persons to whom the
;;;; Software is furnished to do so, subject to the following
;;;; conditions:
;;;;
;;;; The above copyright notice and this permission notice shall be
;;;; included in all copies or substantial portions of the Software.
;;;;
;;;; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
;;;; EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
;;;; OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
;;;; NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
;;;; HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
;;;; WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
;;;; FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
;;;; OTHER DEALINGS IN THE SOFTWARE.
;;;; Code-walkers for the standard Common Lisp forms, which are not
;;;; macros, excluding the lexical binding forms (LET, FLET, etc)
;;;; which are implemented in let-forms.lisp.
(in-package :cl-environments.cltl2)
;;; Standard Common Lisp Special Forms
;;; Generic walker for special-forms which evaluate all arguments.
(eval-when (:compile-toplevel :load-toplevel :execute)
(defun walk-all-args (op args)
"Walks the arguments as ordinary lisp forms."
(cons op
(check-list args
(enclose-forms args))))
(set-walker-functions
'(cl:catch
cl:throw
cl:if
cl:multiple-value-call
cl:multiple-value-prog1
cl:progn
cl:progv
cl:unwind-protect)
#'walk-all-args))
;;; BLOCK
(defwalker cl:block (args)
"Walks the body of the BLOCK form (excluding the block name symbol)"
(match-form (name &rest forms) args
`(,name ,@(enclose-forms forms))))
(defwalker cl:return-from (args)
"Walks the result-form of the RETURN-FROM form."
(match-form (name form) args
`(,name ,(enclose-form form))))
;;; EVAL-WHEN
(defwalker cl:eval-when (args)
"Walks the body of the EVAL-WHEN form."
(match-form (situation &rest forms) args
(cons situation (enclose-forms forms))))
;;; FUNCTION
(defwalker cl:function (args)
"If the body of the FUNCTION form is a lambda expression, it is
walked as a function definition. Otherwise the form arguments are
returned as is."
(match args
((list (list* 'cl:lambda expr))
(list (cons 'cl:lambda (walk-fn-def expr (get-environment *env*)))))
#+clisp
((list name (and (list* 'cl:lambda _) expr))
(list name (second (walk-list-form 'function (list expr)))))
#+ecl
((list (list* 'ext:lambda-block name expr))
(list (list* 'ext:lambda-block name (walk-fn-def expr (get-environment *env*)))))
#+abcl
((list (list* 'system:named-lambda name expr))
(list (list* 'system:named-lambda name (walk-fn-def expr (get-environment *env*)))))
(_ args)))
;;; LOAD-TIME-VALUE
(defwalker cl:load-time-value (args)
"Walks the value form in the global NIL environment."
(match-form (form &optional read-only-p) args
`(,(enclose-in-env *global-environment* (list form)) ,read-only-p)))
;;; LOCALLY
(defwalker cl:locally (args)
"Encloses the body of the LOCALLY form in an environment, augmented
with the declaration information."
(let ((ext-env (copy-environment (get-environment *env*))))
(walk-body args ext-env)))
;;; QUOTE
(defwalker cl:quote (args)
"Returns the arguments unchanged."
args)
;;; SETQ
(defwalker cl:setq (args)
"Walks the value forms."
(check-list args
(loop for (var form) on args by #'next-2
nconc (list var (enclose-form form)))))
;;; TAGBODY
(defwalker cl:tagbody (args)
"Walks the body forms (excluding the tags)."
(flet ((walk-form (form)
(if (atom form)
form
(enclose-form form))))
(check-list args
(mapcar #'walk-form args))))
(defwalker cl:go (args)
"Returns the argument as is."
args)
;;; THE
(defwalker cl:the (args)
"Walks the value form."
(match-form (type form) args
`(,type ,(enclose-form form))))
;;; Clisp specific special forms
#+clisp
(defwalker system::function-macro-let (args)
"Encloses the body of the form in an environment augmented with the
lexical functions introduced by the form. The bodies of the
functions are not walked as this form is only used internally by
Clisp's implementation of CLOS."
(match-form ((&rest fns) . body) args
(let ((ext-env (copy-environment (get-environment *env*))))
(loop for (fn) in fns do (add-function fn ext-env))
`(,fns ,@(walk-body body ext-env)))))
;;; CCL specific special forms
#+ccl
(defwalker ccl::nfunction (args)
(match-form (name ('cl:lambda . _)) args
(list name (second (walk-list-form 'function (cdr args))))))
#+ccl
(defwalker ccl::compiler-let (args)
(match-form (bindings . body) args
(cons bindings (enclose-forms body))))
;;; ECL special special forms
#+ecl
(defwalker multiple-value-bind (args)
"ECL has a buggy macroexpansion for MULTIPLE-VALUE-BIND which
results in an error at runtime if more/less values are returned
than expected."
(match-form ((&rest vars) form . body) args
(let ((env (copy-environment (get-environment *env*))))
(mapc (rcurry #'add-variable env) vars)
`(,vars ,(enclose-form form) ,@(walk-body body env nil)))))
|
# laravel ec sample
## Install
```
cd laradock-ec-sample
docker-compose up --build -d nginx php-fpm postgres redis workspace
docker-compose exec --user=laradock workspace bash
```
```
composer create-project --prefer-dist laravel/laravel ./
```
```
sh .sh/setup.sh
```
## Database
```
psql -d default -U default -h postgres
```
## Xdebug .vscode/launch.json Example
```
{
"version": "0.2.0",
"configurations": [
{
"name": "Listen for XDebug",
"type": "php",
"request": "launch",
"port": 9000,
"pathMappings": {
"/var/www":"${workspaceRoot}/src"
},
"ignore": [
"**/vendor/**/*.php"
]
},
{
"name": "Launch currently open script",
"type": "php",
"request": "launch",
"program": "${file}",
"cwd": "${fileDirname}",
"port": 9000
}
]
}
```
## See Also
- [LaradockใฎworkspaceใใbrowserSyncใๅใใ](https://qiita.com/hbsnow/items/c95e5a0671dabb7b6344)
|
This directory contains scripts used to implement the "Gaussian mean
estimation" simulations in the SMASH paper. In these simulations, we
assess the ability of different signal denoising methods to recover
the true signal after being provided with Gaussian-distributed
observations of the signal. We consider scenarios in which the data
have homoskedastic errors (constant variance) and heteroskedastic
errors (non-constant variance).
To run these simulations, please follow the instructions below. These
instructions assume that you have R and MATLAB installed on your
computer.
File dscr.RData in the "output" directory of this repository contains
previously generated results.
## Instructions
1. Download or clone the [git repository][smash-github] on your computer.
2. Install the following R packages: [AlgDesign][algdesign],
[wavethresh][wavethresh], [EbayesThresh][EbayesThresh],
[dscr][dscr] and [smashr][smash].
3. Run the `InstallMEX.m` script in the Wavelab850 subdirectory to
build the MEX files from their C source.
4. Run the R script [run_dsc.R](run_dsc.R) from the "dsc" directory of
the git repository. This can be done in batch mode (e.g., using
Rscript), or interactively in R or RStudio. When running
interactively, make sure your working directory is the "dsc"
directory of this git repository. Modify the script parameters as
needed.
5. Upon completion, the simulation results will be saved to file
`dscr.RData` in the "dsc" directory.
[smash-github]: https://github.com/stephenslab/smash-paper
[smashr]: https://github.com/stephenslab/smashr
[dscr]: https://github.com/stephens999/dscr
[ebayesthresh]: https://github.com/stephenslab/EbayesThresh
[wavethresh]: https://cran.r-project.org/package=wavethresh
[algdesign]: https://cran.r-project.org/package=AlgDesign
|
/*
* Copyright (c) 2008, 2009, 2010, 2011 Denis Tulskiy
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* version 3 along with this work. If not, see <http://www.gnu.org/licenses/>.
*/
package com.tulskiy.musique.plugins.discogs;
import java.awt.*;
import java.awt.event.ActionEvent;
import java.awt.event.ActionListener;
import java.io.File;
import java.util.ArrayList;
import javax.swing.*;
import com.tulskiy.musique.gui.menu.Menu;
import com.tulskiy.musique.playlist.Playlist;
import com.tulskiy.musique.playlist.Track;
import com.tulskiy.musique.plugins.discogs.dialog.DiscogsDialog;
import com.tulskiy.musique.plugins.discogs.dialog.SettingsDialog;
import com.tulskiy.musique.spi.Plugin;
import com.tulskiy.musique.system.Application;
/**
* @author mliauchuk
*/
public class DiscogsPlugin extends Plugin {
public static final String API_KEY = "09ff0d5c2b";
public static final String DEFAULT_CACHE_ROOT_DIR = System.getProperty("java.io.tmpdir", "");
public static final String CACHE_SUB_DIR = "musique-discogs-cache" + File.separator;
public static final String CONF_PARAM_CACHE_ENABLED = "discogs.cache.enabled";
public static final String CONF_PARAM_CACHE_LOC_TYPE = "discogs.cache.location.type";
public static final String CONF_PARAM_CACHE_LOC_DIR = "discogs.cache.location.dir";
@Override
public boolean init() {
createMenu();
return true;
}
private void createMenu() {
// TODO think about case when no tracks selected but context menu with Discogs setting is to be appeared
registerMenu(MenuType.TRACKS, new Menu.MenuCallback() {
@Override
public JMenu create(final ArrayList<Track> tracks, final Playlist playlist) {
return createMenu(tracks, playlist);
}
});
}
@Override
public void shutdown() {
}
@Override
public Description getDescription() {
return new Description("Discogs", "Maksim Liauchuk", "Tag files via Discogs service (API v1.0)");
}
@Override
public boolean isConfigurable() {
return true;
}
@Override
public void configure(Window parent) {
SettingsDialog settingsDialog = new SettingsDialog(parent);
settingsDialog.setLocationRelativeTo(null);
settingsDialog.setVisible(true);
}
private JMenu createMenu(final ArrayList<Track> tracks, final Playlist playlist) {
JMenu menu = new JMenu("Discogs");
if (tracks.size() > 0) {
JMenuItem retrieve = new JMenuItem("Query");
retrieve.addActionListener(new ActionListener() {
@Override
public void actionPerformed(ActionEvent arg0) {
new DiscogsDialog(tracks, playlist).setVisible(true);
}
});
menu.add(retrieve);
}
return menu;
}
// Configuration settings
public static void setCacheEnabled(boolean b) {
Application.getInstance().getConfiguration().setBoolean(
DiscogsPlugin.CONF_PARAM_CACHE_ENABLED, b);
}
public static boolean isCacheEnabled() {
return Application.getInstance().getConfiguration().getBoolean(
DiscogsPlugin.CONF_PARAM_CACHE_ENABLED, true);
}
public static void setCacheDirType(int n) {
Application.getInstance().getConfiguration().setInt(
DiscogsPlugin.CONF_PARAM_CACHE_LOC_TYPE, n);
}
public static int getCacheDirType() {
return Application.getInstance().getConfiguration().getInt(
DiscogsPlugin.CONF_PARAM_CACHE_LOC_TYPE, 1);
}
public static void setCacheRootDir(String s) {
Application.getInstance().getConfiguration().setString(
DiscogsPlugin.CONF_PARAM_CACHE_LOC_DIR, s);
}
public static String getCacheRootDir() {
return Application.getInstance().getConfiguration().getString(
DiscogsPlugin.CONF_PARAM_CACHE_LOC_DIR, DiscogsPlugin.DEFAULT_CACHE_ROOT_DIR);
}
public static String getCacheDir() {
String cacheRoot = getCacheRootDir();
if (cacheRoot == null) {
cacheRoot = "";
}
else if (!"".equals(cacheRoot) && !cacheRoot.endsWith(File.separator)) {
cacheRoot += File.separator;
}
return cacheRoot + CACHE_SUB_DIR;
}
}
|
package net.christophschubert.kafka.clusterstate.actions;
import net.christophschubert.kafka.clusterstate.ACLEntry;
import net.christophschubert.kafka.clusterstate.ClientBundle;
import java.util.Collections;
import java.util.concurrent.ExecutionException;
public class DeleteAclAction implements Action {
private final ACLEntry entry;
public DeleteAclAction(ACLEntry entry) {
this.entry = entry;
}
@Override
public boolean runRaw(ClientBundle bundle) throws InterruptedException, ExecutionException {
bundle.adminClient.deleteAcls(Collections.singleton(entry.toAclBinding().toFilter())).all().get();
return false;
}
@Override
public String toString() {
return "DeleteAclAction{" +
"entry=" + entry +
'}';
}
}
|
#!/bin/bash
intentos=10
python /home/duque/tesisduque/actualizacion_web.py
#si no retorna 0, que se vuelva a mandar
for ((n=1; n<$intentos; n++))
do
if [ $? -ne 0 ]
then
sleep 5m
python /home/duque/tesisduque/actualizacion_web.py
fi
done
|
Subsets and Splits