hexsha
stringlengths 40
40
| size
int64 140
1.03M
| ext
stringclasses 94
values | lang
stringclasses 21
values | max_stars_repo_path
stringlengths 3
663
| max_stars_repo_name
stringlengths 4
120
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
663
| max_issues_repo_name
stringlengths 4
120
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
663
| max_forks_repo_name
stringlengths 4
135
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 140
1.03M
| avg_line_length
float64 2.32
23.1k
| max_line_length
int64 11
938k
| alphanum_fraction
float64 0.01
1
| score
float32 3
4.25
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6d3f24f7fcc18a388802cfa833f0e2e247098ecd
| 1,681 |
ts
|
TypeScript
|
src/app/services/supabase.service.ts
|
flozim/cosTask
|
09d32e3487e8132d1dac99669450f34b6ead4eda
|
[
"MIT"
] | null | null | null |
src/app/services/supabase.service.ts
|
flozim/cosTask
|
09d32e3487e8132d1dac99669450f34b6ead4eda
|
[
"MIT"
] | null | null | null |
src/app/services/supabase.service.ts
|
flozim/cosTask
|
09d32e3487e8132d1dac99669450f34b6ead4eda
|
[
"MIT"
] | null | null | null |
import { createClient, SupabaseClient } from "@supabase/supabase-js";
import { from } from "rxjs/internal/observable/from";
import { environment } from "src/environments/environment";
import { map, tap } from "rxjs/operators";
import { Observable } from "rxjs";
import { Injectable } from "@angular/core";
import { AppImg } from "../model/appImg";
@Injectable({ providedIn: 'root' })
export class SupabaseService {
private supabase: SupabaseClient;
constructor() {
this.supabase = createClient(environment.supabaseUrl, environment.supbaseKey);
}
/**
*
* @returns Object[] with field id:number
*/
getAllAppImgIds(): Observable<any> {
const query = this.supabase.from('appImg').select('id');
return from(query).pipe(
map(res => res['body'])
);
}
getAppImgById(id: number): Observable<any> {
const query = this.supabase.from('appImg').select('*').filter('id', 'eq', id);
return from(query).pipe(
map(res => res['body'])
);
}
/**
*
* @param tags
* @param url
* @returns AppImg[]
*/
postAppImg(tags: string[], url: string): Observable<any> {
let tagsInString: string = "";
tags.forEach(tag => {
tagsInString = tagsInString + tag + ',';
})
if (tagsInString.charAt(tagsInString.length - 1) === ',') {
tagsInString = tagsInString.slice(0, tagsInString.length - 1)
}
const query = this.supabase.from('appImg').insert({ data: url, tags: tagsInString });
return from(query).pipe(
map(res => res['body'])
);
}
}
| 27.112903 | 93 | 0.577037 | 3 |
fa1d427f4907498834b2f85ae3fa22c683432fd1
| 7,916 |
cpp
|
C++
|
CodeSnippets/rotateMatrix.cpp
|
Teabeans/CPP_Learn
|
a767dd323d67fab5c2baffb5aa6dd3f1e6baa35a
|
[
"MIT"
] | 1 |
2019-01-31T23:42:59.000Z
|
2019-01-31T23:42:59.000Z
|
CodeSnippets/rotateMatrix.cpp
|
Teabeans/CPP_Learn
|
a767dd323d67fab5c2baffb5aa6dd3f1e6baa35a
|
[
"MIT"
] | null | null | null |
CodeSnippets/rotateMatrix.cpp
|
Teabeans/CPP_Learn
|
a767dd323d67fab5c2baffb5aa6dd3f1e6baa35a
|
[
"MIT"
] | 1 |
2020-03-04T18:09:15.000Z
|
2020-03-04T18:09:15.000Z
|
//-----------------------------------------------------------------------------|
// Authorship
//-----------------------------------------------------------------------------|
//
// Tim Lum
// [email protected]
// Created: 2018.07.15
// Modified: 2018.08.22
//
/*
1.7 - RotateMatrix() - P.91
Given an image represented by an NxN matrix, where each pixel in the image is 4
bytes, write a method to rotate the image by 90 degrees.
Can you do this in place?
//-----------------------------------------------------------------------------|
// PROBLEM SETUP AND ASSUMPTIONS
//-----------------------------------------------------------------------------|
A left rotation may be performed by calling a right rotation 3 times.
Only the right (clockwise) rotation shall be handled
The nature of the pixel is immaterial; we may handle the pixel as a 32-bit int
Since the problem specifies an N x N matrix, we address a square aspect ratio
To move a pixel
0 1 X 0 1 X
+---+---+ +---+---+ ([0][0] becomes [1][0])
0 | 1 | 2 | 0 | 4 | 1 | ([1][0] becomes [1][1])
+---+---+ +---+---+
1 | 4 | 3 | 1 | 3 | 2 |
+---+---+ +---+---+
Y Y
Output format is ambiguous, though it is implied that the data itself should be
rotated. However, displaying the "image" and its rotated result may also be
acceptable.
//-----------------------------------------------------------------------------|
// NAIVE APPROACH
//-----------------------------------------------------------------------------|
Iterate across every pixel within a quadrant and for (4) times
Identify the 4 sister-pixels
And swap them in turn
0 1 2 X [X][Y] is sister to
+---+---+---+ [XMax - X][Y] which is sister to
0 | 1 | 2 | 3 | [XMax - X][YMax - Y] which is sister to
+---+---+---+ [X][YMax-Y]
1 | 8 | 9 | 4 |
+---+---+---+
2 | 7 | 6 | 5 |
+---+---+---+
The global behavioral rule may be defined as:
The 90 degree rotational position of any pixel in a square matrix with coordinates:
X, Y
Is
(XMax - Y), X
0 1 2 X
+---+---+---+
0 | X | O | X | 0, 0 rotates 90 degrees to
+---+---+---+ 2, 0 which rotates 90 degrees to
1 | O | O | O | 2, 2 which rotates 90 degrees to
+---+---+---+ 0, 2
2 | X | O | X |
+---+---+---+
//-----------------------------------------------------------------------------|
// OPTIMIZATIONS
//-----------------------------------------------------------------------------|
1) The orientation of the image may be stored as a separate value from 0 to 3.
This may then be used to interpret the N, E, S, W orientation of the image
without modifying the image itself.
Effectively, we may interject an orientation filter which appropriately redirects
array access based upon the rotational state of the image.
This has the added benefit of functioning on non-square arrays, and also facilitates
easy addition of -90 and 180 degree rotations.
From an image editing standpoint, interpretation rather than alteration of the base
data will also better preserve image information.
//-----------------------------------------------------------------------------|
// TIME COMPLEXITY
//-----------------------------------------------------------------------------|
Any solution which modifies the original body of data may not complete faster
than in a time complexity of:
O( n^2 )
A filter solution, however, only adds a constant time alteration to the random
access lookup of the parent data. As a "rotation" is merely the toggling of a
rotation byte, the filter may complete in a time complexity of:
O( 1 )
//-----------------------------------------------------------------------------|
// PSEUDOLOGIC
//-----------------------------------------------------------------------------|
Compare string lengths for equality
Declare alphabet table charCounts
For each character in string1
Add 1 to the appropriate table in charCounts
For each character in string2
Subtract 1 from the appropriate table in charCounts
If the result is <0
Return false
//-----------------------------------------------------------------------------|
// CODE (C++)
//-----------------------------------------------------------------------------|
*/
// Compile with:
// $ g++ --std=c++11 01_07_RotateMatrix.cpp -o RotateMatrix
// Run with:
// $ ./RotateMatrix
#include <string>
#include <iostream>
#include <iomanip>
#define WIDTH 3
#define HEIGHT 7
// Rotation control:
// 0 == Base image
// 1 == 90 degree clockwise rotation
// 2 == 180 degree rotation
// 3 == 270 degree rotation
int ROTATION = 0;
int IMAGE[ WIDTH ][ HEIGHT ];
// (+) --------------------------------|
// #printMatrix( )
// ------------------------------------|
// Desc: Print a matrix
// Params: None
// PreCons: None
// PosCons: None
// RetVal: None
void printMatrix( ) {
int xDim = WIDTH;
int yDim = HEIGHT;
if( ROTATION == 0 ) {
// For rows 0 to MAX...
for( int row = 0 ; row < yDim ; row++ ) {
// Print column 0 to MAX
for( int col = 0 ; col < xDim; col++ ) {
std::cout << std::setw( 3 ) << IMAGE[ col ][ row ] << " ";
}
std::cout << std::endl << std::endl;
}
}
else if( ROTATION == 1 ) {
for( int col = 0 ; col < xDim ; col++ ) {
for( int row = ( yDim - 1 ) ; row >= 0; row-- ) {
std::cout << std::setw( 3 ) << IMAGE[ col ][ row ] << " ";
}
std::cout << std::endl << std::endl;
}
}
else if ( ROTATION == 2 ) {
for( int row = yDim-1 ; row >= 0 ; row-- ) {
for( int col = ( xDim - 1 ) ; col >= 0 ; col-- ) {
std::cout << std::setw( 3 ) << IMAGE[ col ][ row ] << " ";
}
std::cout << std::endl << std::endl;
}
}
else if ( ROTATION == 3 ) {
for( int col = ( xDim - 1 ) ; col >= 0 ; col-- ) {
for( int row = 0 ; row < yDim ; row++ ) {
std::cout << std::setw( 3 ) << IMAGE[ col ][ row ] << " ";
}
std::cout << std::endl << std::endl;
}
}
}
// (+) --------------------------------|
// #rotateMatrix( )
// ------------------------------------|
// Desc: Rotates a matrix
// Params: None
// PreCons: None
// PosCons: None
// RetVal: None
void rotateMatrix( ) {
ROTATION = ( ROTATION + 1 ) % 4;
}
//-----------------------------------------------------------------------------|
// DRIVER
//-----------------------------------------------------------------------------|
// (+) --------------------------------|
// #main( int, char* )
// ------------------------------------|
// Desc: Code driver
// Params: int arg1 - The number of command line arguments passed in
// char* arg2 - The content of the command line arguments
// PreCons: None
// PosCons: None
// RetVal: int - The exit code (0 for normal, -1 for error)
int main( int argc, char* argv[ ] ) {
std::cout << "Test of rotateMatrix( )" << std::endl;
int xDim = WIDTH;
int yDim = HEIGHT;
// For row 0 to MAX
for( int row = 0 ; row < yDim ; row++ ) {
for( int col = 0 ; col < xDim ; col++ ) {
IMAGE[ col ][ row ] = ( xDim * row ) + col;
}
}
printMatrix( );
std::cout << std::endl;
std::cout << "Rotating..." << std::endl << std::endl;
rotateMatrix( );
printMatrix( );
std::cout << std::endl;
std::cout << "Rotating..." << std::endl << std::endl;
rotateMatrix( );
printMatrix( );
std::cout << std::endl;
std::cout << "Rotating..." << std::endl << std::endl;
rotateMatrix( );
printMatrix( );
std::cout << std::endl;
std::cout << "Rotating..." << std::endl << std::endl;
rotateMatrix( );
printMatrix( );
return( 0 );
} // Closing main( int, char* )
// End of file 01_07_RotateMatrix.cpp
| 28.681159 | 84 | 0.457302 | 3.359375 |
057b730a2f2ed588317f3ad487c59eae9d7949b6
| 3,287 |
lua
|
Lua
|
lua/prosesitter/linter/lintreq.lua
|
dvdsk/prosesitter.nvim
|
fc8b34e6f60ef212bfe7b6e98b64b818d472f36c
|
[
"MIT"
] | 6 |
2021-11-08T23:43:47.000Z
|
2022-03-13T12:43:21.000Z
|
lua/prosesitter/linter/lintreq.lua
|
dskleingeld/prosesitter
|
fc8b34e6f60ef212bfe7b6e98b64b818d472f36c
|
[
"MIT"
] | 21 |
2021-07-09T16:32:32.000Z
|
2021-09-19T17:01:25.000Z
|
lua/prosesitter/linter/lintreq.lua
|
dvdsk/prosesitter.nvim
|
fc8b34e6f60ef212bfe7b6e98b64b818d472f36c
|
[
"MIT"
] | null | null | null |
local log = require("prosesitter/log")
local util = require("prosesitter/util")
local state = require("prosesitter/state")
local api = vim.api
local ns = state.ns_placeholders
local M = {}
M.__index = M -- failed table lookups on the instances should fallback to the class table, to get methods
function M.new()
local self = setmetatable({}, M)
-- an array
self.text = {}
-- key: placeholder_id,
-- value: arrays of tables of buf, id(same as key), row_col, idx
self.meta_by_mark = {}
-- key: index of corrosponding text in self.text (idx)
-- value: table of buf, id, row_col, idx(same as key)
self.meta_by_idx = {}
return self
end
-- text can be empty list
-- needs to be passed 1 based start_col
function M:add_range(buf, lines, start_row, start_col)
for i, text in ipairs(lines) do
local row = start_row - 1 + i
self:add(buf, text, row, start_col)
start_col = 1
end
end
function M:append(buf, id, text, start_col)
local meta_list = self.meta_by_mark[id]
local meta = {
buf = buf,
id = id,
row_col = start_col,
idx = #self.text + 1,
}
meta_list[#meta_list + 1] = meta
self.meta_by_idx[meta.idx] = meta
self.text[meta.idx] = text
end
function M:add(buf, text, row, start_col)
local id = nil
local marks = api.nvim_buf_get_extmarks(buf, ns, { row, 0 }, { row, 0 }, {})
if #marks > 0 then
id = marks[1][1] -- there can be a max of 1 placeholder per line
if self.meta_by_mark[id] ~= nil then
self:append(buf, id, text, start_col)
return
end
else
id = api.nvim_buf_set_extmark(buf, ns, row, 0, { end_col = 0 })
end
local meta = { buf = buf, id = id, row_col = start_col, idx = #self.text + 1 }
self.meta_by_mark[id] = { meta }
self.meta_by_idx[meta.idx] = meta
self.text[meta.idx] = text
end
local function delete_by_idx(deleted_meta, array, map)
for i = #deleted_meta, 1, -1 do
local idx = deleted_meta[i].idx
table.remove(array, idx)
table.remove(map, idx)
end
end
function M:clear_lines(buf, start, stop)
local marks = api.nvim_buf_get_extmarks(buf, ns, { start, 0 }, { stop, 0 }, {})
for i = #marks, 1, -1 do
local mark = marks[i]
local id = mark[1]
local deleted = self.meta_by_mark[id]
if deleted ~= nil then
self.meta_by_mark[id] = {}
delete_by_idx(deleted, self.text, self.meta_by_idx)
end
end
end
function M:is_empty()
local empty = next(self.text) == nil
return empty
end
-- returns a request with members:
function M:build()
local req = {}
req.text = table.concat(self.text, " ")
req.areas = {}
-- TODO hide check under debug flag
local meta_seen = {}
for _, meta in ipairs(self.meta_by_idx) do
local hash = table.concat({meta.buf,meta.id,meta.row_col},",")
if meta_seen[hash] ~= nil then
assert(false, "lintreq contains duplicates!")
end
meta_seen[hash] = true
end
local col = 0
for i = 1, #self.text do
local meta = self.meta_by_idx[i]
local area = {
col = col, -- column in text passed to linter
row_col = meta.row_col, -- column in buffer
row_id = meta.id, -- extmark at the start of the row
buf_id = meta.buf,
}
req.areas[#req.areas + 1] = area
col = col + #self.text[i] + 1 -- plus one for the line end
end
self:reset()
return req
end
function M:reset()
self.text = {}
self.meta_by_mark = {}
self.meta_by_idx = {}
end
return M
| 25.091603 | 105 | 0.672041 | 3.296875 |
4576de23e37082e2dea8e638cef12f85c2f2f63b
| 5,442 |
py
|
Python
|
test/unit2/usersignup/validation_test.py
|
cdoremus/udacity-python_web_development-cs253
|
87cf5dd5d0e06ee745d3aba058d96fa46f2aeb6b
|
[
"Apache-2.0"
] | null | null | null |
test/unit2/usersignup/validation_test.py
|
cdoremus/udacity-python_web_development-cs253
|
87cf5dd5d0e06ee745d3aba058d96fa46f2aeb6b
|
[
"Apache-2.0"
] | null | null | null |
test/unit2/usersignup/validation_test.py
|
cdoremus/udacity-python_web_development-cs253
|
87cf5dd5d0e06ee745d3aba058d96fa46f2aeb6b
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Apr 25, 2012
@author: h87966
'''
import unittest
from unit2.usersignup.validation import UserSignupValidation
from unit2.usersignup.validation import VERIFICATION_MESSAGES
from unit2.usersignup.validation import VERIFICATION_MESSAGES_KEYS
from unit2.usersignup.validation import MISMATCHED_PASSWORDS_MESSAGE
class Test(unittest.TestCase):
def setUp(self):
self.validation = UserSignupValidation()
pass
def tearDown(self):
pass
def testIsValidUsername(self):
self.assertTrue(self.validation.is_valid_username("Crag"))
self.assertTrue(self.validation.is_valid_username("Crag-Doremus"))
self.assertTrue(self.validation.is_valid_username("Crag_Doremus"))
self.assertTrue(self.validation.is_valid_username("Cra"))
self.assertFalse(self.validation.is_valid_username("ca"))
self.assertFalse(self.validation.is_valid_username("cat!"))
self.assertTrue(self.validation.is_valid_username("abcdefghijklmnopqrst"))
self.assertFalse(self.validation.is_valid_username("abcdefghijklmnopqrstu"))
pass
def testIsValidPassword(self):
self.assertTrue(self.validation.is_valid_password("Craig"))
self.assertTrue(self.validation.is_valid_password("abcdefghijklmnopqrst"))
self.assertFalse(self.validation.is_valid_password("abcdefghijklmnopqrstu"))
pass
def testIsValidEmail(self):
self.assertTrue(self.validation.is_valid_email("[email protected]"))
self.assertTrue(self.validation.is_valid_email("[email protected]"))
self.assertFalse(self.validation.is_valid_email("Craigfoocom"))
pass
def testValid(self):
username = "Craig"
password = "craig1"
verify = "craig1"
email = "[email protected]"
validMsgs, isValid = self.validation.validate(username, password, verify, email)
self.assertTrue(isValid)
self.assertEmptyMessage([VERIFICATION_MESSAGES_KEYS[0],VERIFICATION_MESSAGES_KEYS[1],VERIFICATION_MESSAGES_KEYS[2],VERIFICATION_MESSAGES_KEYS[3]], validMsgs)
def testValid_BadUsername(self):
username = "Craigasdfasdfasdfasdfadfadfs"
password = "craig1"
verify = "craig1"
email = "[email protected]"
validMsgs, isValid = self.validation.validate(username, password, verify, email)
self.assertFalse(isValid)
self.assertEquals(VERIFICATION_MESSAGES[VERIFICATION_MESSAGES_KEYS[0]], validMsgs[VERIFICATION_MESSAGES_KEYS[0]])
self.assertEmptyMessage([VERIFICATION_MESSAGES_KEYS[1],VERIFICATION_MESSAGES_KEYS[2],VERIFICATION_MESSAGES_KEYS[3]], validMsgs)
def testValid_BadPassword(self):
username = "Craig"
password = "c1"
verify = "c1"
email = "[email protected]"
validMsgs, isValid = self.validation.validate(username, password, verify, email)
self.assertFalse(isValid)
self.assertEquals(VERIFICATION_MESSAGES[VERIFICATION_MESSAGES_KEYS[1]], validMsgs[VERIFICATION_MESSAGES_KEYS[1]])
self.assertEquals(VERIFICATION_MESSAGES[VERIFICATION_MESSAGES_KEYS[2]], validMsgs[VERIFICATION_MESSAGES_KEYS[2]])
self.assertEmptyMessage([VERIFICATION_MESSAGES_KEYS[0],VERIFICATION_MESSAGES_KEYS[3]], validMsgs)
# def testValid_BadVerifyPassword(self):
# username = "Craig"
# password = "c1"
# verify = "c1"
# email = "[email protected]"
# validMsgs, isValid = self.validation.validate(username, password, verify, email)
# self.assertFalse(isValid)
# self.assertEquals(VERIFICATION_MESSAGES[VERIFICATION_MESSAGES_KEYS[2]], validMsgs[VERIFICATION_MESSAGES_KEYS[2]])
# self.assertEmptyMessage([VERIFICATION_MESSAGES_KEYS[0],VERIFICATION_MESSAGES_KEYS[1],VERIFICATION_MESSAGES_KEYS[3]], validMsgs)
def testValid_BadEmail(self):
username = "Craig"
password = "craig1"
verify = "craig1"
email = "craigfoo.com"
validMsgs, isValid = self.validation.validate(username, password, verify, email)
self.assertFalse(isValid)
self.assertEquals(VERIFICATION_MESSAGES[VERIFICATION_MESSAGES_KEYS[3]], validMsgs[VERIFICATION_MESSAGES_KEYS[3]])
self.assertEmptyMessage([VERIFICATION_MESSAGES_KEYS[0],VERIFICATION_MESSAGES_KEYS[2],VERIFICATION_MESSAGES_KEYS[1]], validMsgs)
def testValid_PasswordsDontMatch(self):
username = "Craig"
password = "craig1"
verify = "craig"
email = "[email protected]"
validMsgs, isValid = self.validation.validate(username, password, verify, email)
self.assertFalse(isValid)
self.assertEquals(MISMATCHED_PASSWORDS_MESSAGE, validMsgs[VERIFICATION_MESSAGES_KEYS[1]])
self.assertEquals(MISMATCHED_PASSWORDS_MESSAGE, validMsgs[VERIFICATION_MESSAGES_KEYS[2]])
self.assertEmptyMessage([VERIFICATION_MESSAGES_KEYS[0],VERIFICATION_MESSAGES_KEYS[3]], validMsgs)
def test_is_password_and_verify_equals(self):
self.assertTrue(self.validation.is_password_and_verify_equals("craig", "craig"))
self.assertFalse(self.validation.is_password_and_verify_equals("craig", "craig1"))
def assertEmptyMessage(self, key_list, messages):
for key in key_list:
self.assertEquals('', messages[key], "Message with key " + key + " is not empty")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testValidateUsername']
unittest.main()
| 45.35 | 165 | 0.722161 | 3.015625 |
aff1c0eb8b7dc63fa464f39be82ae8945e56c68b
| 14,075 |
py
|
Python
|
tests/test_fpack_field.py
|
frankurcrazy/fpack
|
ce2369ec3018b20d79f101ed0b439fd312681472
|
[
"BSD-3-Clause"
] | 2 |
2020-08-26T14:16:39.000Z
|
2021-01-11T08:43:36.000Z
|
tests/test_fpack_field.py
|
frankurcrazy/fpack
|
ce2369ec3018b20d79f101ed0b439fd312681472
|
[
"BSD-3-Clause"
] | 17 |
2021-04-28T06:02:45.000Z
|
2022-03-29T18:05:56.000Z
|
tests/test_fpack_field.py
|
frankurcrazy/fpack
|
ce2369ec3018b20d79f101ed0b439fd312681472
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import struct
import unittest
try:
from fpack import *
except ImportError:
import os
import sys
sys.path.append(os.path.abspath(os.path.join(".", "..")))
from fpack import *
class TestField(unittest.TestCase):
def test_not_implemented_methods(self):
field = Field()
with self.assertRaises(NotImplementedError):
field.pack()
with self.assertRaises(NotImplementedError):
field.unpack(b"12345")
with self.assertRaises(NotImplementedError):
field.size
with self.assertRaises(NotImplementedError):
field = Field.from_bytes(b"12345")
def test_str_representation(self):
self.assertEqual(str(Field()), "None")
self.assertEqual(str(Field(1234)), "1234")
class TestPrimitiveFieldPack(unittest.TestCase):
def test_uint8_pack(self):
s = struct.Struct("B")
val = 255
f = Uint8(val)
p = f.pack()
self.assertEqual(len(p), s.size)
self.assertEqual(p, s.pack(val))
def test_uint16_pack(self):
s = struct.Struct("!H")
val = 65535
f = Uint16(val)
p = f.pack()
self.assertEqual(len(p), s.size)
self.assertEqual(p, s.pack(val))
def test_uint32_pack(self):
s = struct.Struct("!I")
val = 12345789
f = Uint32(val)
p = f.pack()
self.assertEqual(len(p), s.size)
self.assertEqual(p, s.pack(val))
def test_uint64_pack(self):
s = struct.Struct("!Q")
val = 2555555555
f = Uint64(val)
p = f.pack()
self.assertEqual(f.size, s.size)
self.assertEqual(len(p), s.size)
self.assertEqual(p, s.pack(val))
def test_int8_pack(self):
s = struct.Struct("b")
val = -128
f = Int8(val)
p = f.pack()
self.assertEqual(f.size, s.size)
self.assertEqual(len(p), s.size)
self.assertEqual(p, s.pack(val))
def test_int16_pack(self):
s = struct.Struct("!h")
val = -32767
f = Int16(val)
p = f.pack()
self.assertEqual(f.size, s.size)
self.assertEqual(len(p), s.size)
self.assertEqual(p, s.pack(val))
def test_int32_pack(self):
s = struct.Struct("!i")
val = -12345789
f = Int32(val)
p = f.pack()
self.assertEqual(f.size, s.size)
self.assertEqual(len(p), s.size)
self.assertEqual(p, s.pack(val))
def test_int64_pack(self):
s = struct.Struct("!q")
val = -2055555555
f = Int64(val)
p = f.pack()
self.assertEqual(f.size, s.size)
self.assertEqual(len(p), s.size)
self.assertEqual(p, s.pack(val))
class TestPrimitiveFieldUnpack(unittest.TestCase):
def test_uint8_unpack(self):
s = struct.Struct("B")
val = 255
f = Uint8()
p = f.unpack(s.pack(val))
self.assertEqual(p, s.size)
self.assertEqual(f.val, val)
p, length = Uint8.from_bytes(s.pack(val))
self.assertTrue(isinstance(p, Uint8))
self.assertEqual(length, s.size)
self.assertEqual(p.val, val)
def test_uint16_unpack(self):
s = struct.Struct("!H")
val = 65535
f = Uint16()
p = f.unpack(s.pack(val))
self.assertEqual(p, s.size)
self.assertEqual(f.val, val)
p, length = Uint16.from_bytes(s.pack(val))
self.assertTrue(isinstance(p, Uint16))
self.assertEqual(length, s.size)
self.assertEqual(p.val, val)
def test_uint32_unpack(self):
s = struct.Struct("!I")
val = 12345789
f = Uint32()
p = f.unpack(s.pack(val))
self.assertEqual(p, s.size)
self.assertEqual(f.val, val)
p, length = Uint32.from_bytes(s.pack(val))
self.assertTrue(isinstance(p, Uint32))
self.assertEqual(length, s.size)
self.assertEqual(p.val, val)
def test_uint64_unpack(self):
s = struct.Struct("!Q")
val = 2555555555
f = Uint64()
p = f.unpack(s.pack(val))
self.assertEqual(p, s.size)
self.assertEqual(f.val, val)
p, length = Uint64.from_bytes(s.pack(val))
self.assertTrue(isinstance(p, Uint64))
self.assertEqual(length, s.size)
self.assertEqual(p.val, val)
def test_int8_unpack(self):
s = struct.Struct("b")
val = -128
f = Int8()
p = f.unpack(s.pack(val))
self.assertEqual(p, s.size)
self.assertEqual(f.val, val)
p, length = Int8.from_bytes(s.pack(val))
self.assertTrue(isinstance(p, Int8))
self.assertEqual(length, s.size)
self.assertEqual(p.val, val)
def test_int16_unpack(self):
s = struct.Struct("!h")
val = -32767
f = Int16()
p = f.unpack(s.pack(val))
self.assertEqual(p, s.size)
self.assertEqual(f.val, val)
p, length = Int16.from_bytes(s.pack(val))
self.assertTrue(isinstance(p, Int16))
self.assertEqual(length, s.size)
self.assertEqual(p.val, val)
def test_int32_unpack(self):
s = struct.Struct("!i")
val = -12345789
f = Int32()
p = f.unpack(s.pack(val))
self.assertEqual(p, s.size)
self.assertEqual(f.val, val)
p, length = Int32.from_bytes(s.pack(val))
self.assertTrue(isinstance(p, Int32))
self.assertEqual(length, s.size)
self.assertEqual(p.val, val)
def test_int64_unpack(self):
s = struct.Struct("!q")
val = -2055555555
f = Int64()
p = f.unpack(s.pack(val))
self.assertEqual(p, s.size)
self.assertEqual(f.val, val)
p, length = Int64.from_bytes(s.pack(val))
self.assertTrue(isinstance(p, Int64))
self.assertEqual(length, s.size)
self.assertEqual(p.val, val)
def test_uint8_unpack_undersize(self):
s = struct.Struct("B")
val = 255
f = Uint8()
with self.assertRaises(ValueError):
f.unpack(s.pack(val)[:0])
def test_uint16_unpack_undersize(self):
s = struct.Struct("!H")
val = 65535
f = Uint16()
with self.assertRaises(ValueError):
f.unpack(s.pack(val)[:0])
def test_uint32_unpack_undersize(self):
s = struct.Struct("!I")
val = 12345789
f = Uint32()
with self.assertRaises(ValueError):
f.unpack(s.pack(val)[:0])
def test_uint64_unpack_undersize(self):
s = struct.Struct("!Q")
val = 2555555555
f = Uint64()
with self.assertRaises(ValueError):
f.unpack(s.pack(val)[:0])
def test_int8_unpack_undersize(self):
s = struct.Struct("b")
val = -128
f = Int8()
with self.assertRaises(ValueError):
f.unpack(s.pack(val)[:0])
def test_int16_unpack_undersize(self):
s = struct.Struct("!h")
val = -32767
f = Int16()
with self.assertRaises(ValueError):
f.unpack(s.pack(val)[:1])
def test_int32_unpack_undersize(self):
s = struct.Struct("!i")
val = -12345789
f = Int32()
with self.assertRaises(ValueError):
f.unpack(s.pack(val)[:3])
def test_int64_unpack_undersize(self):
s = struct.Struct("!q")
val = -2055555555
f = Int64()
with self.assertRaises(ValueError):
f.unpack(s.pack(val)[:5])
class TestStringField(unittest.TestCase):
def test_pack_string(self):
val = "helloworld!"
field = String(val)
packed = field.pack()
test_packed = struct.pack("!H", len(val)) + val.encode("utf-8")
self.assertEqual(packed, test_packed)
self.assertEqual(field.size, len(test_packed))
def test_pack_string_empty(self):
val = ""
field = String(val)
packed = field.pack()
test_packed = struct.pack("!H", len(val)) + val.encode("utf-8")
self.assertEqual(packed, test_packed)
self.assertEqual(field.size, len(test_packed))
def test_pack_string_none(self):
val = None
field = String(val)
packed = field.pack()
self.assertEqual(str(field), "None")
self.assertEqual(packed, b"\x00" * 2)
def test_unpack_string(self):
val = "helloworld!"
test_packed = struct.pack("!H", len(val)) + val.encode("utf-8")
unpacked, length = String.from_bytes(test_packed)
self.assertEqual(unpacked.val, val)
self.assertEqual(unpacked.size, len(test_packed))
self.assertEqual(str(unpacked), f'"{val}"')
def test_unpack_string_undersized(self):
val = "helloworld!"
test_packed = struct.pack("!H", len(val)) + val.encode("utf-8")
with self.assertRaises(ValueError):
unpacked, length = String.from_bytes(test_packed[:-1])
with self.assertRaises(ValueError):
unpacked, length = String.from_bytes(test_packed[:1])
def test_unpack_string_oversized(self):
val = "helloworld!"
test_packed = struct.pack("!H", len(val)) + val.encode("utf-8")
sth = b"testdata123"
unpacked, length = String.from_bytes(test_packed + sth)
self.assertEqual(unpacked.val, val)
self.assertEqual(unpacked.size, len(test_packed))
class TestBytesField(unittest.TestCase):
def test_pack_bytes(self):
val = b"helloworld!"
field = Bytes(val)
packed = field.pack()
test_packed = struct.pack("!H", len(val)) + val
self.assertEqual(packed, test_packed)
self.assertEqual(field.size, len(test_packed))
def test_pack_bytes_empty(self):
val = b""
field = Bytes(val)
packed = field.pack()
test_packed = struct.pack("!H", len(val)) + val
self.assertEqual(packed, test_packed)
self.assertEqual(field.size, len(test_packed))
def test_pack_bytes_none(self):
val = None
field = Bytes(val)
packed = field.pack()
self.assertEqual(str(field), "None")
self.assertEqual(packed, b"\x00" * 2)
def test_unpack_bytes(self):
val = b"helloworld!"
test_packed = struct.pack("!H", len(val)) + val
unpacked, length = Bytes.from_bytes(test_packed)
self.assertEqual(unpacked.val, val)
self.assertEqual(unpacked.size, len(test_packed))
self.assertEqual(str(unpacked), f"{val}")
def test_unpack_bytes_undersized(self):
val = b"helloworld!"
test_packed = struct.pack("!H", len(val)) + val
with self.assertRaises(ValueError):
unpacked, length = Bytes.from_bytes(test_packed[:-1])
with self.assertRaises(ValueError):
unpacked, length = Bytes.from_bytes(test_packed[:1])
def test_unpack_bytes_oversized(self):
val = b"helloworld!"
test_packed = struct.pack("!H", len(val)) + val
sth = b"testdata123"
unpacked, length = Bytes.from_bytes(test_packed + sth)
self.assertEqual(unpacked.val, val)
self.assertEqual(unpacked.size, len(test_packed))
class TestArrayField(unittest.TestCase):
def test_pack_string_array(self):
array_of_string = [
String("this"),
String("is"),
String("an"),
String("array"),
String("of"),
String("strings."),
]
StringArray = array_field_factory("StringArray", String)
array = StringArray(array_of_string)
item_strings = f"[{','.join(str(x) for x in array_of_string)}]"
packed = b"\x00\x06"
for s in array_of_string:
packed += s.pack()
self.assertEqual(len(array), len(array_of_string))
self.assertEqual(array.size, 37)
self.assertEqual(
str(array),
f"<StringArray length={len(array_of_string)} items={item_strings}>",
)
self.assertEqual(array.pack(), packed)
def test_pack_string_array_incompatible_size(self):
array_of_string = [
String("this"),
String("is"),
String("an"),
String("array"),
String("of"),
Bytes(b"strings."),
]
StringArray = array_field_factory("StringArray", String)
with self.assertRaises(TypeError):
array = StringArray(array_of_string)
array.pack()
def test_unpack_string_array(self):
StringArray = array_field_factory("StringArray", String)
raw = b"\x00\x06\x00\x04this\x00\x02is\x00\x02an\x00\x05array\x00\x02of\x00\x08strings."
unpacked, s = StringArray.from_bytes(raw)
self.assertTrue(unpacked.size, len(raw))
self.assertEqual(s, len(raw))
self.assertTrue(len(unpacked), 6)
def test_unpack_string_array_undersized(self):
StringArray = array_field_factory("StringArray", String)
raw = b"\x00\x06\x00\x04this\x00\x02is\x00\x02an\x00\x05array\x00\x02of\x00\x08strings."
with self.assertRaises(ValueError):
unpacked, s = StringArray.from_bytes(raw[:0])
def test_unpack_string_array_incomplete(self):
StringArray = array_field_factory("StringArray", String)
raw = b"\x00\x06\x00\x04this\x00\x02is\x00\x02an\x00\x05array\x00\x02of\x00\x08strings."
with self.assertRaises(ValueError):
unpacked, s = StringArray.from_bytes(raw[:-1])
class TestFieldFactory(unittest.TestCase):
def test_field_factory(self):
fieldClass = field_factory("Test", Uint8)
self.assertEqual(fieldClass.__name__, "Test")
self.assertTrue(issubclass(fieldClass, Uint8))
def test_array_field_factory(self):
fieldClass = array_field_factory("TestArray", Uint8)
self.assertEqual(fieldClass.__name__, "TestArray")
self.assertTrue(issubclass(fieldClass, Array))
if __name__ == "__main__":
unittest.main()
| 28.783231 | 96 | 0.590764 | 3.625 |
af85339abcf9cdc6c1c0d4d49be5260fdb385720
| 5,877 |
py
|
Python
|
aos/util/trapezoid_profile.py
|
AustinSchuh/971-Robot-Code
|
99abc66fd2d899c0bdab338dc6f57dc5def9be8d
|
[
"Apache-2.0"
] | 39 |
2021-06-18T03:22:30.000Z
|
2022-03-21T15:23:43.000Z
|
aos/util/trapezoid_profile.py
|
AustinSchuh/971-Robot-Code
|
99abc66fd2d899c0bdab338dc6f57dc5def9be8d
|
[
"Apache-2.0"
] | 10 |
2021-06-18T03:22:19.000Z
|
2022-03-18T22:14:15.000Z
|
aos/util/trapezoid_profile.py
|
AustinSchuh/971-Robot-Code
|
99abc66fd2d899c0bdab338dc6f57dc5def9be8d
|
[
"Apache-2.0"
] | 4 |
2021-08-19T19:20:04.000Z
|
2022-03-08T07:33:18.000Z
|
#!/usr/bin/python3
import numpy
class TrapezoidProfile(object):
"""Computes a trapezoidal motion profile
Attributes:
_acceleration_time: the amount of time the robot will travel at the
specified acceleration (s)
_acceleration: the acceleration the robot will use to get to the target
(unit/s^2)
_constant_time: amount of time to travel at a constant velocity to reach
target (s)
_deceleration_time: amount of time to decelerate (at specified
deceleration) to target (s)
_deceleration: decceleration the robot needs to get to goal velocity
(units/s^2)
_maximum_acceleration: the maximum acceleration (units/s^2)
_maximum_velocity: the maximum velocity (unit/s)
_timestep: time between calls to Update (delta_time)
_output: output array containing distance to goal and velocity
"""
def __init__(self, delta_time):
"""Constructs a TrapezoidProfile.
Args:
delta_time: time between calls to Update (seconds)
"""
self._acceleration_time = 0
self._acceleration = 0
self._constant_time = 0
self._deceleration_time = 0
self._deceleration = 0
self._maximum_acceleration = 0
self._maximum_velocity = 0
self._timestep = delta_time
self._output = numpy.array(numpy.zeros((2,1)))
# Updates the state
def Update(self, goal_position, goal_velocity):
self._CalculateTimes(goal_position - self._output[0], goal_velocity)
next_timestep = self._timestep
# We now have the amount of time we need to accelerate to follow the
# profile, the amount of time we need to move at constant velocity
# to follow the profile, and the amount of time we need to decelerate to
# follow the profile. Do as much of that as we have time left in dt.
if self._acceleration_time > next_timestep:
self._UpdateVals(self._acceleration, next_timestep)
else:
self._UpdateVals(self._acceleration, self._acceleration_time)
next_timestep -= self._acceleration_time
if self._constant_time > next_timestep:
self._UpdateVals(0, next_timestep)
else:
self._UpdateVals(0, self._constant_time)
next_timestep -= self._constant_time;
if self._deceleration_time > next_timestep:
self._UpdateVals(self._deceleration, next_timestep)
else:
self._UpdateVals(self._deceleration, self._deceleration_time)
next_timestep -= self._deceleration_time
self._UpdateVals(0, next_timestep)
return self._output
# Useful for preventing windup etc.
def MoveCurrentState(self, current):
self._output = current
# Useful for preventing windup etc.
def MoveGoal(self, dx):
self._output[0] += dx
def SetGoal(self, x):
self._output[0] = x
def set_maximum_acceleration(self, maximum_acceleration):
self._maximum_acceleration = maximum_acceleration
def set_maximum_velocity(self, maximum_velocity):
self._maximum_velocity = maximum_velocity
def _UpdateVals(self, acceleration, delta_time):
self._output[0, 0] += (self._output[1, 0] * delta_time
+ 0.5 * acceleration * delta_time * delta_time)
self._output[1, 0] += acceleration * delta_time
def _CalculateTimes(self, distance_to_target, goal_velocity):
if distance_to_target == 0:
self._acceleration_time = 0
self._acceleration = 0
self._constant_time = 0
self._deceleration_time = 0
self._deceleration = 0
return
elif distance_to_target < 0:
# Recurse with everything inverted.
self._output[1] *= -1
self._CalculateTimes(-distance_to_target, -goal_velocity)
self._output[1] *= -1
self._acceleration *= -1
self._deceleration *= -1
return
self._constant_time = 0
self._acceleration = self._maximum_acceleration
maximum_acceleration_velocity = (
distance_to_target * 2 * numpy.abs(self._acceleration)
+ self._output[1] * self._output[1])
if maximum_acceleration_velocity > 0:
maximum_acceleration_velocity = numpy.sqrt(maximum_acceleration_velocity)
else:
maximum_acceleration_velocity = -numpy.sqrt(-maximum_acceleration_velocity)
# Since we know what we'd have to do if we kept after it to decelerate, we
# know the sign of the acceleration.
if maximum_acceleration_velocity > goal_velocity:
self._deceleration = -self._maximum_acceleration
else:
self._deceleration = self._maximum_acceleration
# We now know the top velocity we can get to.
top_velocity = numpy.sqrt((distance_to_target +
(self._output[1] * self._output[1]) /
(2.0 * self._acceleration) +
(goal_velocity * goal_velocity) /
(2.0 * self._deceleration)) /
(-1.0 / (2.0 * self._deceleration) +
1.0 / (2.0 * self._acceleration)))
# If it can go too fast, we now know how long we get to accelerate for and
# how long to go at constant velocity.
if top_velocity > self._maximum_velocity:
self._acceleration_time = ((self._maximum_velocity - self._output[1]) /
self._maximum_acceleration)
self._constant_time = (distance_to_target +
(goal_velocity * goal_velocity -
self._maximum_velocity * self._maximum_velocity) /
(2.0 * self._maximum_acceleration)) / self._maximum_velocity
else:
self._acceleration_time = (
(top_velocity - self._output[1]) / self._acceleration)
if self._output[1] > self._maximum_velocity:
self._constant_time = 0
self._acceleration_time = 0
self._deceleration_time = (
(goal_velocity - top_velocity) / self._deceleration)
| 37.433121 | 84 | 0.671261 | 3.8125 |
aca84deda1b0b6d77ad779a3a9a699cac1a540a9
| 1,276 |
lua
|
Lua
|
wyx/event/EntityPositionEvent.lua
|
scottcs/wyx
|
554324cf36faf28da437d4af52fe392e9507cf62
|
[
"MIT"
] | null | null | null |
wyx/event/EntityPositionEvent.lua
|
scottcs/wyx
|
554324cf36faf28da437d4af52fe392e9507cf62
|
[
"MIT"
] | null | null | null |
wyx/event/EntityPositionEvent.lua
|
scottcs/wyx
|
554324cf36faf28da437d4af52fe392e9507cf62
|
[
"MIT"
] | null | null | null |
local Class = require 'lib.hump.class'
local Event = getClass 'wyx.event.Event'
-- EntityPositionEvent
--
local EntityPositionEvent = Class{name='EntityPositionEvent',
inherits=Event,
function(self, entityID, toX, toY, fromX, fromY)
if type(entityID) ~= 'string' then entityID = entityID:getID() end
verify('string', entityID)
verify('number', toX, toY, fromX, fromY)
assert(EntityRegistry:exists(entityID),
'EntityPositionEvent: entityID %q does not exist', entityID)
Event.construct(self, 'Entity Position Event')
self._debugLevel = 2
self._entityID = entityID
self._toX = toX
self._toY = toY
self._fromX = fromX
self._fromY = fromY
end
}
-- destructor
function EntityPositionEvent:destroy()
self._entityID = nil
self._toX = nil
self._toY = nil
self._fromX = nil
self._fromY = nil
Event.destroy(self)
end
function EntityPositionEvent:getEntity() return self._entityID end
function EntityPositionEvent:getDestination() return self._toX, self._toY end
function EntityPositionEvent:getOrigin() return self._fromX, self._fromY end
function EntityPositionEvent:__tostring()
return self:_msg('{%08s} from: (%d,%d) to: (%d,%d)',
self._entityID, self._fromX, self._fromY, self._toX, self._toY)
end
-- the class
return EntityPositionEvent
| 27.148936 | 77 | 0.746082 | 3.15625 |
540ce22d1dfacf7247ef098bdde4bb677e972232
| 1,083 |
lua
|
Lua
|
gatherer.lua
|
shagu/delvermd
|
946709d5b75c8df29b7ac106bb5181573c476dd9
|
[
"MIT"
] | 1 |
2021-04-17T06:14:11.000Z
|
2021-04-17T06:14:11.000Z
|
gatherer.lua
|
shagu/delvermd
|
946709d5b75c8df29b7ac106bb5181573c476dd9
|
[
"MIT"
] | null | null | null |
gatherer.lua
|
shagu/delvermd
|
946709d5b75c8df29b7ac106bb5181573c476dd9
|
[
"MIT"
] | null | null | null |
-- download gatherer images
local gatherer = {}
function gatherer:Fetch(collection, images)
local https = require("ssl.https")
local id = 0
for i, card in pairs(collection) do
id = id + 1
io.write("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b")
io.write("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b")
io.write(" - Downloading Gatherer Artwork ("..id..")")
io.flush()
local cache = io.open("cache/images/" .. card.scryfall .. ".jpg")
if card.multiverse and card.imgurl and not cache then
local image = fetchlang and https.request(card.imgurl_lang)
image = image or https.request(card.imgurl)
if image then
images[i]["stock"] = image
local file = io.open("cache/images/" .. card.scryfall .. ".jpg", "w")
file:write(image)
file:close()
else
print(string.format(" WARNING: No Image for '%s' (%s)", card.name, card.multiverse))
end
elseif cache then
cache:close()
end
end
print()
return images
end
return gatherer
| 29.27027 | 92 | 0.601108 | 3.0625 |
b72ba37b25e9a84db429d533e2a07e383b2cabfb
| 8,714 |
cpp
|
C++
|
dblsqd/feed.cpp
|
Mudlet/dblsqd-sdk-qt
|
80f0da767c24e9c1a89e58aac913dd86e22d4c06
|
[
"Apache-2.0"
] | 1 |
2021-06-02T06:33:52.000Z
|
2021-06-02T06:33:52.000Z
|
dblsqd/feed.cpp
|
Mudlet/dblsqd-sdk-qt
|
80f0da767c24e9c1a89e58aac913dd86e22d4c06
|
[
"Apache-2.0"
] | 2 |
2020-11-12T11:46:45.000Z
|
2021-11-17T23:53:35.000Z
|
dblsqd/feed.cpp
|
Mudlet/dblsqd-sdk-qt
|
80f0da767c24e9c1a89e58aac913dd86e22d4c06
|
[
"Apache-2.0"
] | null | null | null |
#include "dblsqd/feed.h"
namespace dblsqd {
/*!
\class Feed
* \brief The Feed class provides methods for accessing DBLSQD Feeds and downloading Releases.
*
* A Feed is a representation of an Application’s Releases.
* This class can retrieve Feeds via HTTP(S) and offers convenience methods for
*
* \section3 Loading Feeds
*
* Before a Feed can be loaded with load(), it needs to be initialized with setUrl().
*
* \section3 Downloading Updates
* This class also allows downloading updates through the downloadRelease() method.
*/
/*!
* \brief Constructs a new Feed object.
*
* \sa setUrl()
*/
Feed::Feed(QString baseUrl, QString channel, QString os, QString arch, QString type)
: feedReply(NULL),
downloadReply(NULL),
downloadFile(NULL),
redirects(0),
_ready(false)
{
if (!baseUrl.isEmpty()) {
this->setUrl(baseUrl, channel, os, arch, type);
}
}
/*!
* \brief Sets the Feed URL.
*
* This method can be used to manually set the Feed URL.
*/
void Feed::setUrl(QUrl url) {
this->url = url;
}
/*!
* \brief Sets the Feed URL by specifying its components.
*
* The only required component is baseUrl which must be the base URL for an Application
* provided by the DBSLQD CLI Tool. It should include the full schema and does not require
* a trailing "/".
*/
void Feed::setUrl(QString baseUrl, QString channel, QString os, QString arch, QString type) {
QStringList urlParts;
urlParts << baseUrl;
urlParts << channel;
if (!os.isEmpty()) {
urlParts << os;
} else {
QString autoOs = QSysInfo::productType().toLower();
if (autoOs == "windows") {
autoOs = "win";
} else if (autoOs == "osx" || autoOs == "macos") {
autoOs = "mac";
} else {
autoOs = QSysInfo::kernelType();
}
urlParts << autoOs;
}
if (!arch.isEmpty()) {
urlParts << arch;
} else {
QString autoArch = QSysInfo::buildCpuArchitecture();
if (autoArch== "i386" || autoArch == "i586" || autoArch == "i586") {
autoArch = "x86";
}
urlParts << autoArch;
}
if (!type.isEmpty()) {
urlParts << "?t=" + type;
}
this->url = QUrl(urlParts.join("/"));
}
/*!
* \brief Returns the Feed URL.
*/
QUrl Feed::getUrl() {
return QUrl(url);
}
/*!
* \brief Returns a list of all Releases in the Feed.
*
* The list is sorted in descending order by version number/release date.
* If called before ready() was emitted, an empty list is returned.
* \sa getReleases()
*/
QList<Release> Feed::getReleases() {
return releases;
}
/*!
* \brief Returns a list of all Releases in the Feed that are newer than the given Release.
*
* The list is sorted in descending order by version number/release date.
* If called before ready() was emitted, an empty list is returned.
* \sa getReleases()
*/
QList<Release> Feed::getUpdates(Release currentRelease) {
QList<Release> updates;
for (const auto& release: releases) {
if (currentRelease.getVersion().toLower() != release.getVersion().toLower() && currentRelease < release) {
updates << release;
}
}
return updates;
}
/*!
* \brief Returns the pointer to a QTemporaryFile for a downloaded file.
*
* If called before downloadFinished() was emitted, this might return a NULL
* pointer.
*/
QTemporaryFile* Feed::getDownloadFile() {
return downloadFile;
}
/*!
* \brief Returns true if Feed information has been retrieved successfully.
*
* A ready Feed might not contain any release information.
* If downloading the Feed failed, false is returned.
*/
bool Feed::isReady() {
return _ready;
}
/*
* Async API functions
*/
/*!
* \brief Retrieves and parses data from the Feed.
*
* A Feed URL must have been set before with setUrl(). Emits ready() or loadError() on completion.
*/
void Feed::load() {
if (feedReply != NULL && !feedReply->isFinished()) {
return;
}
QNetworkRequest request(getUrl());
feedReply = nam.get(request);
connect(feedReply, SIGNAL(finished()), this, SLOT(handleFeedFinished()));
}
/*!
* \brief Starts the download of a given Release.
* \sa downloadFinished() downloadError() downloadProgress()
*/
void Feed::downloadRelease(Release release) {
redirects = 0;
makeDownloadRequest(release.getDownloadUrl());
this->release = release;
}
/*
* Private methods
*/
void Feed::makeDownloadRequest(QUrl url) {
if (downloadReply != NULL && !downloadReply->isFinished()) {
disconnect(downloadReply);
downloadReply->abort();
downloadReply->deleteLater();
}
if (downloadFile != NULL) {
disconnect(downloadFile);
downloadFile->close();
downloadFile->deleteLater();
downloadFile = NULL;
}
QNetworkRequest request(url);
downloadReply= nam.get(request);
connect(downloadReply, SIGNAL(downloadProgress(qint64,qint64)), this, SLOT(handleDownloadProgress(qint64,qint64)));
connect(downloadReply, SIGNAL(readyRead()), this, SLOT(handleDownloadReadyRead()));
connect(downloadReply, SIGNAL(finished()), this, SLOT(handleDownloadFinished()));
}
/*
* Signals
*/
/*! \fn void Feed::ready()
* This signal is emitted when a Feed has been successfully downloaded and parsed.
* \sa loadError() load()
*/
/*! \fn void Feed::loadError(QString message)
* This signal is emitted when a Feed could not be downloaded.
* When loadError() is emitted, ready() is not emitted.
* \sa ready() load()
*/
/*! \fn void Feed::downloadProgress(qint64 bytesReceived, qint64 bytesTotal)
* This signal is emitted during the download of a Release through downloadRelease().
* \sa downloadRelease()
*/
/*! \fn void Feed::downloadFinished()
* This signal is emitted when the download of a Release was successful.
* A QTemporaryFile* of the downloaded file can then be retrieved with getDownloadFile().
* \sa downloadRelease()
*/
/*! \fn void Feed::downloadError()
* This signal is emitted when there was an error downloading or verifying a Release.
* When downloadError() is emitted, downloadFinished() is not emitted.
* \sa downloadFinished() downloadRelease()
*/
/*
* Private Slots
*/
void Feed::handleFeedFinished() {
if (feedReply->error() != QNetworkReply::NoError) {
emit loadError(feedReply->errorString());
return;
}
releases.clear();
QByteArray json = feedReply->readAll();
QJsonDocument doc = QJsonDocument::fromJson(json);
QJsonArray releasesInfo = doc.object().value("releases").toArray();
for (int i = 0; i < releasesInfo.size(); i++) {
releases << Release(releasesInfo.at(i).toObject());
}
std::sort(releases.begin(), releases.end());
std::reverse(releases.begin(), releases.end());
_ready = true;
emit ready();
}
void Feed::handleDownloadProgress(qint64 bytesReceived, qint64 bytesTotal) {
emit downloadProgress(bytesReceived, bytesTotal);
}
void Feed::handleDownloadReadyRead() {
if (downloadFile == NULL) {
QString fileName = downloadReply->url().fileName();
int extensionPos = fileName.indexOf(QRegExp("(?:\\.tar)?\\.[a-zA-Z0-9]+$"));
if (extensionPos > -1) {
fileName.insert(extensionPos, "-XXXXXX");
}
downloadFile = new QTemporaryFile(QDir::tempPath() + "/" + fileName);
downloadFile->open();
}
downloadFile->write(downloadReply->readAll());
}
void Feed::handleDownloadFinished() {
if (downloadReply->error() != QNetworkReply::NoError) {
emit downloadError(downloadReply->errorString());
return;
} else if (!downloadReply->attribute(QNetworkRequest::RedirectionTargetAttribute).isNull()) {
if (redirects >= 8) {
emit downloadError(tr("Too many redirects."));
return;
}
QUrl redirectionTarget = downloadReply->attribute(QNetworkRequest::RedirectionTargetAttribute).toUrl();
QUrl redirectedUrl = downloadReply->url().resolved(redirectionTarget);
redirects ++;
makeDownloadRequest(redirectedUrl);
return;
} else if (downloadFile == NULL) {
emit downloadError(tr("No data received from server"));
return;
}
downloadFile->flush();
downloadFile->seek(0);
QCryptographicHash fileHash(QCryptographicHash::Sha256);
fileHash.addData(downloadFile->readAll());
QString hashResult = fileHash.result().toHex();
if (hashResult.toLower() != release.getDownloadSHA256().toLower())
{
emit downloadError(tr("Could not verify download integrity."));
return;
}
emit downloadFinished();
}
} // namespace dblsqd
| 28.759076 | 119 | 0.653661 | 3.234375 |
0d7220e2a6da8d1ae15b986f80f63c31592c0aed
| 4,984 |
rb
|
Ruby
|
lib/rhouse.rb
|
derailed/rhouse
|
2c1c45077d58586a3101191f885c57aaa4361301
|
[
"Unlicense",
"MIT"
] | 10 |
2015-03-18T13:08:54.000Z
|
2021-07-31T02:29:16.000Z
|
lib/rhouse.rb
|
derailed/rhouse
|
2c1c45077d58586a3101191f885c57aaa4361301
|
[
"Unlicense",
"MIT"
] | null | null | null |
lib/rhouse.rb
|
derailed/rhouse
|
2c1c45077d58586a3101191f885c57aaa4361301
|
[
"Unlicense",
"MIT"
] | 3 |
2016-02-01T16:49:45.000Z
|
2021-07-31T02:29:45.000Z
|
# Sets up the Rhouse gem environment.
# Configures logging, database connection and various paths
module Rhouse
# Gem version
VERSION = '0.0.3'
# Root path of rhouse
PATH = ::File.expand_path(::File.join(::File.dirname(__FILE__), *%w[..]))
# Lib path
LIBPATH = ::File.join( PATH, "lib" )
# Configuration path
CONFPATH = ::File.join( PATH, "config" )
class << self
# Holds the rhouse configuration hash
attr_reader :config
# Holds the environment the gem is running under
attr_reader :environment
# The version string for the library.
def version() VERSION; end
# Helper to find file from the root path
def path( *args )
args.empty? ? PATH : ::File.join( PATH, args.flatten )
end
# Helper to locate a file in lib directory
def libpath( *args )
args.empty? ? LIBPATH : ::File.join( LIBPATH, args.flatten )
end
# Helper to locate a configuration file in the config dir
def confpath( *args )
args.empty? ? CONFPATH : ::File.join( CONFPATH, args.flatten )
end
# Initializes the gem. Sets up logging and database connections if required
def initialize( opts={} )
@config = default_config.merge( opts )
@environment = (config[:environment] || ENV['RH_ENV'] || :test).to_s
establish_db_connection if @config[:requires_db]
@initialized = true
end
public :initialize
# For testing only !
def reset
@logger = nil
@config = nil
@environment = nil
@initialized = false
end
public :reset
# Is rhouse initialized
def initialized?() @initialized; end
# Is the gem running in production env?
def production_env?() environment == 'production'; end
# Is the gem running in test env?
def test_env?() environment == 'test' ; end
# Connects to the pluto database
def establish_db_connection
return if ActiveRecord::Base.connected? || !@environment
require 'active_record'
database = YAML.load_file( conf_path( "database.yml" ) )
ActiveRecord::Base.colorize_logging = true
ActiveRecord::Base.logger = logger # set up the AR logger before connecting
logger.debug "--- Establishing database connection in '#{@environment.upcase}' environment"
ActiveRecord::Base.establish_connection( database[@environment] )
end
# Helper to locate a configuration file
def conf_path( *args )
@conf_path ||= CONFPATH
args.empty? ? @confpath : File.join( @conf_path, *args )
end
# Sets up the default configuration env.
# By default test env, no db connection and stdout logging
def default_config
{
:environment => :test,
:requires_db => false,
:log_level => :info,
:log_file => $stdout,
:email_alert_level => :error
}
end
# Helper to require all files from a given location
def require_all_libs_relative_to( fname, dir = nil )
dir ||= ::File.basename(fname, '.*')
search_me = ::File.expand_path(
::File.join(::File.dirname(fname), dir, '**', '*.rb'))
Dir.glob(search_me).sort.each do |rb|
# puts "[REQ] #{rb}"
require rb
end
end
# Sets up the rhouse logger. Using the logging gem
def logger
return @logger if @logger
# the logger is initialized before anything else, including the database, so include it here.
require "logger"
@logger = Rhouse::Logger.new( {
:log_file => config[:log_file],
:log_level => config[:log_level],
:email_alerts_to => config[:email_alerts_to],
:email_alert_level => config[:email_alert_level],
:additive => false
} )
end
# For debuging
def dump
logger << "-" * 22 + " RHouse configuration " + "-" * 76 + "\n"
config.keys.sort{ |a,b| a.to_s <=> b.to_s }.each do |k|
key = k.to_s.rjust(20)
value = config[k]
if value.blank?
logger << "#{key} : #{value.inspect.rjust(97," ")}\n" # shows an empty hashes/arrays, nils, etc.
else
case value
when Hash
logger << "#{key} : #{(value.keys.first.to_s + ": " + value[value.keys.first].inspect).rjust(97,' ')}\n"
value.keys[1..-1].each { |k| logger << " "*23 + (k.to_s + " : " + value[k].inspect).rjust(97," ") + "\n" }
else
logger << "#{key} : #{value.to_s.rjust(97," ")}\n"
end
end
end
logger << "-" * 120 + "\n"
end
end
require Rhouse.libpath(*%w[core_ext active_record base])
require Rhouse.libpath(*%w[core_ext active_record connection_adapter])
require_all_libs_relative_to( File.join( File.dirname(__FILE__), %w[rhouse] ) )
end
| 33.675676 | 120 | 0.582865 | 3.203125 |
2fd72d125f5346054e3757f9a48f62bf0af118ee
| 565 |
py
|
Python
|
ADA_KING.py
|
myid13221/CODECHEF-PYTHON
|
849532482f1ede127b299ab2d6000f27b99ee7b9
|
[
"MIT"
] | null | null | null |
ADA_KING.py
|
myid13221/CODECHEF-PYTHON
|
849532482f1ede127b299ab2d6000f27b99ee7b9
|
[
"MIT"
] | 4 |
2020-10-04T07:49:30.000Z
|
2021-10-02T05:24:40.000Z
|
ADA_KING.py
|
myid13221/CODECHEF-PYTHON
|
849532482f1ede127b299ab2d6000f27b99ee7b9
|
[
"MIT"
] | 7 |
2020-10-04T07:46:55.000Z
|
2021-11-05T14:30:00.000Z
|
# cook your dish here
try:
t = int(input())
for _ in range(t):
r, c, k = map(int, input().rstrip().split(' '))
if r <= k:
start_row = 1
else:
start_row = r-k
if c <= k:
start_col = 1
else:
start_col = c-k
if r+k >= 8:
end_row = 8
else:
end_row = r+k
if c+k >= 8:
end_col = 8
else:
end_col = c+k
print((end_row - start_row + 1)*(end_col - start_col + 1))
except:
pass
| 20.925926 | 66 | 0.39646 | 3.09375 |
3905b5a759cf16385fad603df3999be49ecaf31d
| 2,184 |
py
|
Python
|
Bank.py
|
sahgh313/Bank_project
|
9a24ff9fd4946b8dd84e77bc5f9e2e03527b2f26
|
[
"MIT"
] | null | null | null |
Bank.py
|
sahgh313/Bank_project
|
9a24ff9fd4946b8dd84e77bc5f9e2e03527b2f26
|
[
"MIT"
] | null | null | null |
Bank.py
|
sahgh313/Bank_project
|
9a24ff9fd4946b8dd84e77bc5f9e2e03527b2f26
|
[
"MIT"
] | null | null | null |
#پروژه بانک
#نوشته شده توسط امیر حسین غرقی
class Bank:
def Create(self):
self.first_name = input( 'Enter first name : ')
self.last_name = input("Enter your last name : ")
self.phone_number = input("Enter your phone number, sample : 0912*****54 :")
self.value = float(input("Enter your start value : "))
while self.value < 0 :
print("First value can not be negative !")
self.value = float(input("Enter your start value : "))
def Add(self):
self.to_add = float(input("How much do you want to add? "))
while self.to_add < 0 :
print("Can't be negative! try again ")
self.to_add = float(input("How much do you want to add? "))
self.value += self.to_add
print ("your balance is :", self.value)
def Sub(self):
self.sub_from = float(input("how much do you want to take? "))
while self.sub_from < 0 and self.sub_from > self.value:
print("Cant be negative! try again ")
self.sub_from = float(input("how much do you want to take? "))
self.value -= self.sub_from
print ("your balance is :", self.value)
def Show(self):
print(self.first_name, self.last_name,"phone number", self.phone_number,"account balance", self.value)
#------main---------------------------------
print("""
Wellcome
here are your choices:)
press 1 to create an account;
press 2 to deposit to your account;
press 3 to withdraw from the account
press 4 to show your info;
press 0 to exit;
""")
customer = Bank()
while True :
print("""
Wellcome
here are your choices:)
press 1 to create an account;
press 2 to deposit to your account;
press 3 to withdraw from the account
press 4 to show your info;
press 0 to exit;
""")
menu = int(input(""))
if menu == 1:
customer.Create()
elif menu == 2 :
customer.Add()
elif menu == 3 :
customer.Sub()
elif menu == 4 :
customer.Show()
elif menu == 0 :
break
| 24.539326 | 110 | 0.553114 | 3.5 |
364b306495d7d4936efa56daed17a1e1abbbf71d
| 4,241 |
lua
|
Lua
|
Client/Spec.lua
|
vugi99/nanos-vzombies
|
7aaf33a474cd1947c9bc12b93ebe238aefce1c7d
|
[
"Unlicense"
] | 4 |
2021-11-17T22:04:49.000Z
|
2022-03-08T17:29:32.000Z
|
Client/Spec.lua
|
vugi99/nanos-vzombies
|
7aaf33a474cd1947c9bc12b93ebe238aefce1c7d
|
[
"Unlicense"
] | null | null | null |
Client/Spec.lua
|
vugi99/nanos-vzombies
|
7aaf33a474cd1947c9bc12b93ebe238aefce1c7d
|
[
"Unlicense"
] | null | null | null |
Spectating_Player = nil
function GetResetPlyID(old_ply_id, prev_ply)
local selected_ply_id
local selected_ply
for k, v in pairs(Player.GetPairs()) do
if not v.BOT then
if v ~= Client.GetLocalPlayer() then
if v:GetID() ~= old_ply_id then
local char = v:GetControlledCharacter()
if char then
if (not selected_ply_id or ((v:GetID() < selected_ply_id and not prev_ply) or (v:GetID() > selected_ply_id and prev_ply))) then
selected_ply_id = v:GetID()
selected_ply = v
end
end
end
end
end
end
return selected_ply
end
function GetNewPlayerToSpec(old_ply_id, prev_ply)
old_ply_id = old_ply_id or 0
local new_ply
local new_ply_id
for k, v in pairs(Player.GetPairs()) do
if not v.BOT then
if v ~= Client.GetLocalPlayer() then
local char = v:GetControlledCharacter()
if char then
if (((v:GetID() > old_ply_id and not new_ply_id and not prev_ply) or (v:GetID() < old_ply_id and not new_ply_id and prev_ply)) or (((v:GetID() > old_ply_id and not prev_ply) or (v:GetID() < old_ply_id and prev_ply)) and ((new_ply_id > v:GetID() and not prev_ply) or (new_ply_id < v:GetID() and prev_ply)))) then
new_ply = v
new_ply_id = v:GetID()
end
end
end
end
end
if not new_ply then
new_ply = GetResetPlyID(old_ply_id, prev_ply)
end
return new_ply
end
function IsSpectatingPlayerCharacter(char)
if Spectating_Player then
local spec_char = Spectating_Player:GetControlledCharacter()
if spec_char == char then
return true
end
end
end
function SpectatePlayer(to_spec)
if to_spec then
Client.GetLocalPlayer():Spectate(to_spec)
Spectating_Player = to_spec
local char = Spectating_Player:GetControlledCharacter()
local picked = char:GetPicked()
if picked then
NeedToUpdateAmmoText(char, picked)
end
One_Time_Updates_Canvas:Repaint()
end
end
function StopSpectate()
Client.GetLocalPlayer():ResetCamera()
Spectating_Player = nil
One_Time_Updates_Canvas:Repaint()
end
VZ_EVENT_SUBSCRIBE("Player", "Possess", function(ply, char)
--print("Player Possess")
if ply == Client.GetLocalPlayer() then
StopSpectate()
elseif (not Spectating_Player and not Client.GetLocalPlayer():GetControlledCharacter()) then
local new_spec = GetNewPlayerToSpec()
SpectatePlayer(new_spec)
end
end)
VZ_EVENT_SUBSCRIBE("Player", "UnPossess", function(ply, char)
--print("Player UnPossess", ply, char)
if ply == Client.GetLocalPlayer() then
local new_spec = GetNewPlayerToSpec()
--print("new_spec, unpossess", new_spec)
SpectatePlayer(new_spec)
elseif ply == Spectating_Player then
local new_spec = GetNewPlayerToSpec()
if new_spec then
SpectatePlayer(new_spec)
else
StopSpectate()
end
end
end)
VZ_EVENT_SUBSCRIBE("Player", "Destroy", function(ply)
--print("Player Destroy")
if ply == Spectating_Player then
local new_spec = GetNewPlayerToSpec()
if new_spec then
SpectatePlayer(new_spec)
else
StopSpectate()
end
end
end)
if not Client.GetLocalPlayer():GetControlledCharacter() then
local new_spec = GetNewPlayerToSpec()
--print("new_spec", new_spec)
SpectatePlayer(new_spec)
end
Input.Register("SpectatePrev", "Left")
Input.Register("SpectateNext", "Right")
VZ_BIND("SpectatePrev", InputEvent.Pressed, function()
if Spectating_Player then
local new_spec = GetNewPlayerToSpec(Spectating_Player:GetID(), true)
SpectatePlayer(new_spec)
end
end)
VZ_BIND("SpectateNext", InputEvent.Pressed, function()
if Spectating_Player then
local new_spec = GetNewPlayerToSpec(Spectating_Player:GetID())
SpectatePlayer(new_spec)
end
end)
| 30.292857 | 331 | 0.626975 | 3.109375 |
ff9bc5ab4842e7843a1b236ef9a6f7677a154cd1
| 7,505 |
py
|
Python
|
ImageAnalysisSanSalvadorCNN.py
|
falbav/Image-Analysis-San-Salvador
|
c2cb2a27ffed3b134ceb329d9358563d374bddd9
|
[
"MIT"
] | 1 |
2021-04-22T23:05:49.000Z
|
2021-04-22T23:05:49.000Z
|
ImageAnalysisSanSalvadorCNN.py
|
falbav/Image-Analysis-San-Salvador
|
c2cb2a27ffed3b134ceb329d9358563d374bddd9
|
[
"MIT"
] | null | null | null |
ImageAnalysisSanSalvadorCNN.py
|
falbav/Image-Analysis-San-Salvador
|
c2cb2a27ffed3b134ceb329d9358563d374bddd9
|
[
"MIT"
] | null | null | null |
"""
This code explores Different Models of Convolutional Neural Networks
for the San Salvador Gang Project
@author: falba and ftop
"""
import os
import google_streetview.api
import pandas as pd
import numpy as np
import sys
import matplotlib.image as mp_img
from matplotlib import pyplot as plot
from skimage import io
from skimage.color import rgb2gray
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.model_selection import train_test_split
#pip install tensorflow keras numpy skimage matplotlib
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras import layers
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.utils import to_categorical
from keras.layers import LSTM, Embedding
from keras.preprocessing.image import ImageDataGenerator
from sklearn.linear_model import LogisticRegression
from numpy import where
from keras import regularizers
import random
#### Load the data
## Set Directory
os.chdir('C:/Users/falba/Dropbox/ImageAnalysis/San Salvador/GangBoundaries')
df = pd.read_csv("C:/Users/falba/Dropbox/ImageAnalysis/San Salvador/GangBoundaries/sample.csv", header=0)
df
astr = "C:/Users/falba/Dropbox/ImageAnalysis/San Salvador/GangBoundaries/"
# Let's create a sample for testing and training:
train, test = train_test_split(df, test_size=0.25, random_state=38)
# Obtaining the image data of testing
test_cases=[]
test_class=[]
file=test.file
for x in file:
image = io.imread(astr+x)
image =rgb2gray(image)
test_cases.append(image)
test_cases=np.reshape(np.ravel(test_cases),(579,480,480,-1))
for index, Series in test.iterrows():
test_class.append(Series["gang_territory10"])
test_class=np.reshape(test_class,(579,-1))
# The image data of training
train_cases=[]
train_class=[]
fileT=train.file
for x in fileT:
image = io.imread(astr+x)
image=rgb2gray(image)
train_cases.append(image)
train_cases=np.reshape(train_cases,(1735,480,480,-1))
for index, series in train.iterrows():
train_class.append(series["gang_territory10"])
train_class=np.reshape(train_class,(1735,-1))
## To Categorical
#y_train = to_categorical(train_class)
#y_test= to_categorical(test_class)
input_dim = train_cases.shape[1]
maxlen = 100
### Now let's try a Convolutional Neural Networks
# Seeting up Convolution Layers and Filters
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=(480,480,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='Adam',metrics=['accuracy'])
model.summary()
hist_1 = model.fit(train_cases,train_class,verbose=False,epochs=50,validation_data=(test_cases,test_class),batch_size=10)
hist1acc=model.evaluate(test_cases,test_class)
#accuracy: 0.6165
# plot loss during training
plot.subplot(211)
#plot.title('Loss / Binary Crossentropy')
plot.plot(hist_1.history['loss'], label='Train')
plot.plot(hist_1.history['val_loss'], label='Test')
plot.legend()
plot.show()
plot.subplot(212)
#plot.title('Accuracy / Binary Crossentropy')
plot.plot(hist_1.history['accuracy'], label='Train')
plot.plot(hist_1.history['val_accuracy'], label='Test')
plot.legend()
plot.show()
#plot.savefig('LossBinCross.png')
# Binary CrossEntropy - Model 2
model = Sequential()
model.add(Conv2D(8, (5, 5), activation='relu', input_shape=(480,480,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(2, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
hist_2 = model.fit(train_cases,train_class,verbose=False,epochs=30,validation_data=(test_cases,test_class),batch_size=10)
# evaluate the model
hist2acc=model.evaluate(test_cases,test_class)
#Accuracy 0.5354
# plot accuracy during training
plot.subplot(212)
plot.title('Accuracy / Binary Crossentropy')
plot.plot(hist_2.history['accuracy'], label='Train')
plot.plot(hist_2.history['val_accuracy'], label='Test')
plot.legend()
plot.show()
plot.subplot(211)
plot.title('Loss / Binary Crossentropy')
plot.plot(hist_2.history['loss'], label='Train')
plot.plot(hist_2.history['val_loss'], label='Test')
plot.legend()
plot.show()
## Seems like EPOCH migh be too high. Optimal can be less than 10
## Maybe because overfitting
# Binary CrossEntropy - Model 3
model = Sequential()
model.add(Conv2D(10, (11, 11), activation='relu', input_shape=(480,480,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(20, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Conv2D(100, (4, 4), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
hist_3 = model.fit(train_cases,train_class,verbose=False,epochs=30,validation_data=(test_cases,test_class),batch_size=10)
# evaluate the model
hist3acc=model.evaluate(test_cases,test_class)
#Accuracy 53%
## Graphs
plot.subplot(212)
#plot.title('Accuracy / Binary Crossentropy')
plot.plot(hist_3.history['accuracy'], label='Train')
plot.plot(hist_3.history['val_accuracy'], label='Test')
plot.legend()
plot.show()
plot.subplot(211)
#plot.title('Loss / Binary Crossentropy')
plot.plot(hist_3.history['loss'], label='Train')
plot.plot(hist_3.history['val_loss'], label='Test')
plot.legend()
plot.show()
## LET'S TRY REGULARIZATION
model = Sequential()
model.add(Conv2D(8, (5, 5), activation='relu', input_shape=(480,480,1)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(2, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Flatten())
model.add(Dense(10,
kernel_regularizer=regularizers.l2(0.01),
activity_regularizer=regularizers.l1(0.01)))
#model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
hist= model.fit(train_cases,train_class,verbose=False,epochs=50,validation_data=(test_cases,test_class),batch_size=10)
# evaluate the model
hist_acc=model.evaluate(test_cases,test_class)
#Accuracy 53%
# plot accuracy during training
plot.subplot(212)
plot.title('Accuracy / Binary Crossentropy')
plot.plot(hist.history['accuracy'], label='Train')
plot.plot(hist.history['val_accuracy'], label='Test')
plot.legend()
plot.show()
plot.subplot(211)
plot.title('Loss / Binary Crossentropy')
plot.plot(hist.history['loss'], label='Train')
plot.plot(hist.history['val_loss'], label='Test')
plot.legend()
plot.show()
## It didnt help much with accuracy but it did with the loss
| 31.533613 | 122 | 0.72445 | 3.1875 |
4483085aa1a5617f634634d102a9b9e536d610e8
| 931 |
py
|
Python
|
testproject/fiber_test/test_util.py
|
bsimons/django-fiber
|
0f4b03217a4aeba6b48908825507fbe8c5732c8d
|
[
"Apache-2.0"
] | null | null | null |
testproject/fiber_test/test_util.py
|
bsimons/django-fiber
|
0f4b03217a4aeba6b48908825507fbe8c5732c8d
|
[
"Apache-2.0"
] | null | null | null |
testproject/fiber_test/test_util.py
|
bsimons/django-fiber
|
0f4b03217a4aeba6b48908825507fbe8c5732c8d
|
[
"Apache-2.0"
] | null | null | null |
import re
from django.template import Template, Context
try:
from django.utils.timezone import make_aware, utc
except ImportError:
make_aware, utc = None, None
def format_list(l, must_sort=True, separator=' '):
"""
Format a list as a string. Default the items in the list are sorted.
E.g.
>>> format_list([3, 2, 1])
u'1 2 3'
"""
titles = [unicode(v) for v in l]
if must_sort:
titles = sorted(titles)
return separator.join(titles)
def condense_html_whitespace(s):
s = re.sub("\s\s*", " ", s)
s = re.sub(">\s*<", "><", s)
s = re.sub(" class=\"\s?(.*?)\s?\"", " class=\"\\1\"", s)
s = s.strip()
return s
class RenderMixin(object):
def assertRendered(self, template, expected, context=None):
t, c = Template(template), Context(context or {})
self.assertEqual(condense_html_whitespace(t.render(c)), condense_html_whitespace(expected))
| 25.162162 | 99 | 0.619764 | 3.03125 |
b13ccfad2951d647cb40f2c1270d80f96834712b
| 1,734 |
py
|
Python
|
tensorflowpractice/MNIST_data/transform.py
|
nifannn/MachineLearningNotes
|
de38b2072a52a22483168fb10ac2cb896826c7e5
|
[
"MIT"
] | 1 |
2021-11-11T14:52:11.000Z
|
2021-11-11T14:52:11.000Z
|
tensorflowpractice/MNIST_data/transform.py
|
nifannn/MachineLearningNotes
|
de38b2072a52a22483168fb10ac2cb896826c7e5
|
[
"MIT"
] | null | null | null |
tensorflowpractice/MNIST_data/transform.py
|
nifannn/MachineLearningNotes
|
de38b2072a52a22483168fb10ac2cb896826c7e5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from PIL import Image
import struct
import csv
import progressbar
def read_image(filename, saveaddr):
with open(filename, 'rb') as f:
buf = f.read()
index = 0
magic, number, rows, cols = struct.unpack_from('>IIII', buf, index)
index += struct.calcsize('>IIII')
readbar = progressbar.ProgressBar('reading images ', number, 0)
readbar.show()
for cnt in range(number):
img = Image.new('L', (cols, rows))
for x in range(rows):
for y in range(cols):
img.putpixel((y, x), int(struct.unpack_from('>B', buf, index)[0]))
index += struct.calcsize('>B')
img.save(saveaddr + '/' + str(cnt) + '.png')
readbar.increase()
readbar.present()
print('Successfully read all images from ' + filename)
def read_label(filename, savefile):
with open(filename, 'rb') as f:
buf = f.read()
index = 0
magic, number = struct.unpack_from('>II', buf, index)
index += struct.calcsize('>II')
with open(savefile, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter = ',')
csvwriter.writerow(['No', 'label'])
readbar = progressbar.ProgressBar('reading labels ', number, 0)
readbar.show()
for cnt in range(number):
label = int(struct.unpack_from('>B', buf, index)[0])
index += struct.calcsize('>B')
csvwriter.writerow(map(str, [cnt, label]))
readbar.increase()
readbar.present()
print('Successfully read all labels from ' + filename)
def main():
read_image('train-images-idx3-ubyte', 'train-images-idx3')
read_label('train-labels-idx1-ubyte', 'train-labels-idx1/train-labels.csv')
read_image('t10k-images-idx3-ubyte', 't10k-images-idx3')
read_label('t10k-labels-idx1-ubyte', 't10k-labels-idx1/test-labels.csv')
if __name__ == '__main__':
main()
| 27.52381 | 76 | 0.67474 | 3.5625 |
63cc39d178e1ccbf55d8a1c46ed24536025571c3
| 3,581 |
dart
|
Dart
|
lib/app/modules/search_results/search_results_controller.dart
|
7ANV1R/RoveAssist
|
5b7cc71171bb03faa30ebde463e0e4766bfda261
|
[
"MIT"
] | 2 |
2021-10-12T05:32:38.000Z
|
2021-11-04T22:01:39.000Z
|
lib/app/modules/search_results/search_results_controller.dart
|
7ANV1R/RoveAssist
|
5b7cc71171bb03faa30ebde463e0e4766bfda261
|
[
"MIT"
] | null | null | null |
lib/app/modules/search_results/search_results_controller.dart
|
7ANV1R/RoveAssist
|
5b7cc71171bb03faa30ebde463e0e4766bfda261
|
[
"MIT"
] | null | null | null |
import 'dart:convert';
import 'package:flutter/cupertino.dart';
import 'package:flutter_dotenv/flutter_dotenv.dart';
import 'package:get/get.dart';
import 'package:http/http.dart' as http;
import 'package:roveassist/app/data/models/service_model/place_model.dart';
import 'package:roveassist/app/data/models/service_model/restaurant_model.dart';
import '../../data/models/service_model/package_tour_model.dart';
class SearchResultsController extends GetxController {
final String localhost = dotenv.env['BASE_URL'] ?? 'not found';
@override
void onInit() {
super.onInit();
}
@override
void onReady() {
super.onReady();
}
@override
void onClose() {}
TextEditingController queryController = TextEditingController();
RxList<PackageTourModel> packageTourList = RxList<PackageTourModel>();
RxList<RestaurantModel> restaurantResultList = RxList<RestaurantModel>();
RxList<PlaceModel> placeList = RxList<PlaceModel>();
Future<void> fetchPackageTour(String query) async {
try {
String baseUrl = '$localhost/features/packagetour/';
Map<String, String> headers = {'Content-Type': 'application/json'};
http.Response response = await http.get(
Uri.parse(baseUrl),
headers: headers,
);
final List<PackageTourModel> fetchedPackageTour = List<PackageTourModel>.from(
(json.decode(response.body) as List<dynamic>)
.map(
(e) => PackageTourModel.fromJson(e as Map<String, dynamic>),
)
.where((result) {
final titleLower = result.title.toLowerCase();
final searchLower = query.toLowerCase();
return titleLower.contains(searchLower);
}),
).toList();
packageTourList.value = fetchedPackageTour.reversed.toList();
print(packageTourList);
} catch (e) {}
}
Future<void> fetchRestaurant(String query) async {
try {
String baseUrl = '$localhost/features/restaurant/';
Map<String, String> headers = {'Content-Type': 'application/json'};
http.Response response = await http.get(
Uri.parse(baseUrl),
headers: headers,
);
final List<RestaurantModel> fetchedRestaurant = List<RestaurantModel>.from(
(json.decode(response.body) as List<dynamic>)
.map(
(e) => RestaurantModel.fromJson(e as Map<String, dynamic>),
)
.where((result) {
final titleLower = result.title.toLowerCase();
final searchLower = query.toLowerCase();
return titleLower.contains(searchLower);
}),
).toList();
restaurantResultList.value = fetchedRestaurant.reversed.toList();
print(restaurantResultList);
} catch (e) {}
}
Future<void> fetchPlace(String query) async {
try {
String baseUrl = '$localhost/features/place/';
Map<String, String> headers = {'Content-Type': 'application/json'};
http.Response response = await http.get(
Uri.parse(baseUrl),
headers: headers,
);
final List<PlaceModel> fetchedPlace = List<PlaceModel>.from(
(json.decode(response.body) as List<dynamic>)
.map(
(e) => PlaceModel.fromJson(e as Map<String, dynamic>),
)
.where((result) {
final titleLower = result.title.toLowerCase();
final searchLower = query.toLowerCase();
return titleLower.contains(searchLower);
}),
).toList();
placeList.value = fetchedPlace.reversed.toList();
print(restaurantResultList);
} catch (e) {}
}
}
| 31.690265 | 84 | 0.646188 | 3.015625 |
b01dca4c7fd50c169eb78f6cd96703e886ac93bb
| 4,976 |
py
|
Python
|
src/data/make_dataset.py
|
maycownd/deep-learning-fashion-mnist
|
bbaf0c952d92f37f40052d76470c44fc3d71bad5
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
maycownd/deep-learning-fashion-mnist
|
bbaf0c952d92f37f40052d76470c44fc3d71bad5
|
[
"MIT"
] | null | null | null |
src/data/make_dataset.py
|
maycownd/deep-learning-fashion-mnist
|
bbaf0c952d92f37f40052d76470c44fc3d71bad5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import click
import logging
from dotenv import find_dotenv, load_dotenv
import pandas as pd
import numpy as np
from PIL import Image
PATH_TRAIN = "data/raw/fashion-mnist_train.csv"
PATH_TEST = "data/raw/fashion-mnist_test.csv"
DATA_PATH = "data/raw/"
dict_fashion = {
0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot'
}
def csv2img(csv, path, is_train=True):
"""
Convert pixel values from .csv to .png image
Source: https://www.kaggle.com/alexanch/image-classification-w-fastai-fashion-mnist
"""
# define the name of the directory to be created
if is_train:
image_path = "working/train/"
else:
image_path = "working/test/"
full_path = os.path.join(path, image_path)
if os.path.isdir(full_path):
return None
try:
os.makedirs(full_path)
except OSError:
print("Creation of the directory %s failed" % full_path)
else:
print("Successfully created the directory %s" % full_path)
for i in range(len(csv)):
# csv.iloc[i, 1:].to_numpy() returns pixel values array
# for i'th imag excluding the label
# next step: reshape the array to original shape(28, 28)
# and add missing color channels
result = Image.fromarray(np.uint8(
np.stack(
np.rot90(
csv.iloc[i, 1:].to_numpy().
reshape((28, 28)))*3, axis=-1)))
# save the image:
result.save(f'{full_path}{str(i)}.png')
print(f'{len(csv)} images were created.')
def create_train_test(csv_train, csv_test, data_path=DATA_PATH):
"""Create images on `data_path` from data provided by csvs.
This is just a wrapper of csv2img to create the images provided
by many csvs at once.
Args:
csv_list ([type]): [description]
data_path (str, optional): [description]. Defaults to "../../Data/raw".
"""
csv2img(csv_train, data_path, True)
csv2img(csv_test, data_path, False)
def import_xy(
path_train=PATH_TRAIN,
path_test=PATH_TEST,
label_name="label"):
"""Import data from specified path.
Args:
path_train (str, optional): [description]. Defaults to PATH_TRAIN.
path_test ([type], optional): [description]. Defaults to PATH_TEST.
label_name (str, optional): [description]. Defaults to "label".
Returns:
[type]: [description]
"""
# importng the data from the paths which are there by default
df_train = pd.read_csv(path_train)
df_test = pd.read_csv(path_test)
# creating images from csv data
create_train_test(df_train, df_test)
# creating labels
df_train['label_text'] = df_train['label'].apply(lambda x: dict_fashion[x])
df_test['label_text'] = df_test['label'].apply(lambda x: dict_fashion[x])
# add image names:
df_train['img'] = pd.Series([str(i)+'.png' for i in range(len(df_train))])
df_test['img'] = pd.Series([str(i)+'.png' for i in range(len(df_test))])
X_train, y_train = df_train.drop("label", axis=1), df_train["label"]
X_test, y_test = df_test.drop("label", axis=1), df_test["label"]
# save corresponding labels and image names to .csv file:
df_train[['img', 'label_text']].to_csv(
os.path.join(DATA_PATH,
'working/train_image_labels.csv'), index=False)
df_test[['img', 'label_text']].to_csv(
os.path.join(DATA_PATH,
'working/test_image_labels.csv'), index=False)
return X_train, y_train, X_test, y_test
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True),
default="data/raw/")
@click.argument('output_filepath', type=click.Path(),
default="data/interim/")
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
if not input_filepath:
input_filepath="data/raw/fashion-mnist_train.csv"
if not output_filepath:
output_filepath="data/interim/mnist_train.csv"
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
X_train, y_train, X_test, y_test = import_xy()
print("imported data")
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main()
| 32.953642 | 87 | 0.635651 | 3.03125 |
da59f5df0119d2eae3302cb4497a35071feb2501
| 7,110 |
tsx
|
TypeScript
|
packages/tyreant/src/type/integer.tsx
|
tyranid-org/tyranid
|
985b855df04b2b6236b2dc1c3bab4438670f136c
|
[
"Apache-2.0"
] | 9 |
2015-07-22T20:24:14.000Z
|
2022-01-05T06:48:30.000Z
|
packages/tyreant/src/type/integer.tsx
|
tyranid-org/tyranid
|
985b855df04b2b6236b2dc1c3bab4438670f136c
|
[
"Apache-2.0"
] | 158 |
2015-06-21T05:18:05.000Z
|
2022-02-25T15:00:29.000Z
|
packages/tyreant/src/type/integer.tsx
|
tyranid-org/tyranid
|
985b855df04b2b6236b2dc1c3bab4438670f136c
|
[
"Apache-2.0"
] | null | null | null |
import * as React from 'react';
import { useEffect } from 'react';
import { Slider, InputNumber, Select } from 'antd';
import { Tyr } from 'tyranid/client';
import { mapPropsToForm, onTypeChange } from './type';
import { TyrFilter, FilterDdProps } from '../core/filter';
import { byName, TyrTypeProps } from './type';
import { decorateField } from '../core';
import { registerComponent } from '../common';
import { withThemedTypeContext } from '../core/theme';
type SliderValue = [number, number] | undefined;
const { Option } = Select;
const filterOptions = [
'Equals',
'Not equal',
'Less Than',
'Less Than or equals',
'Greater than',
'Greater than or equals',
];
export const TyrIntegerBase = <D extends Tyr.Document = Tyr.Document>(
props: TyrTypeProps<D>
) => {
useEffect(() => mapPropsToForm(props), [
props.path && props.path.name,
props.document,
]);
return decorateField('integer', props, () => {
return (
<InputNumber
{...(props.searchRange
? {
min: props.searchRange[0] as number,
max: props.searchRange[1] as number,
}
: {})}
onChange={ev => onTypeChange(props, ev, undefined)}
placeholder={props.placeholder}
tabIndex={props.tabIndex}
precision={0}
step="1"
{...(props.formatter !== undefined && { formater: props.formatter })}
{...(props.min !== undefined && { min: props.min })}
{...(props.max !== undefined && { max: props.max })}
/>
);
});
};
export const TyrInteger = withThemedTypeContext('integer', TyrIntegerBase);
byName.integer = {
component: TyrIntegerBase,
cellValue: (
path: Tyr.PathInstance,
document: Tyr.Document,
props: TyrTypeProps<any>
) => {
const v = path.get(document);
if (!v || typeof v === 'number') {
return props.formatter ? props.formatter(v as number) : v;
}
return '[Error]';
},
filter(component, props) {
const path = props.path!;
const { searchRange, searchNumber } = props;
const sliderFilter = (filterDdProps: FilterDdProps) => {
const defaultValue = (searchRange
? (searchRange as [number, number])
: [0, 100]) as [number, number];
const compProps = {
...(searchRange ? { min: searchRange[0] as number } : { min: 0 }),
...(searchRange ? { max: searchRange[1] as number } : { max: 100 }),
};
return (
<TyrFilter<SliderValue>
typeName="integer"
component={component}
filterDdProps={filterDdProps}
pathProps={props}
>
{(searchValue, setSearchValue) => (
<Slider
range
{...compProps}
value={searchValue || (defaultValue.slice() as [number, number])}
onChange={setSearchValue}
/>
)}
</TyrFilter>
);
};
const numberFilter = (filterDdProps: FilterDdProps) => (
<TyrFilter<[string, number?]>
typeName="integer"
component={component}
filterDdProps={filterDdProps}
pathProps={props}
>
{(searchValue, setSearchValue, search) => {
const setSearchValueChoice = (choice: string) => {
setSearchValue([choice, searchValue ? searchValue[1] : undefined]);
};
const setSearchValueNumber = (
value: string | number | null | undefined
) => {
if (!isNaN(value as any)) {
setSearchValue([
searchValue ? searchValue[0] : filterOptions[0],
value as number,
]);
}
};
return (
<React.Fragment>
<Select
defaultValue={searchValue ? searchValue[0] : filterOptions[0]}
onChange={setSearchValueChoice}
style={{ width: 188, marginBottom: 8, display: 'block' }}
>
{filterOptions.map(op => (
<Option key={op} value={op}>
{op}
</Option>
))}
</Select>
<InputNumber
autoFocus={true}
value={searchValue ? searchValue[1] : undefined}
onChange={setSearchValueNumber}
onPressEnter={() => search()}
style={{ width: 188, marginBottom: 8, display: 'block' }}
/>
</React.Fragment>
);
}}
</TyrFilter>
);
return {
filterDropdown: searchNumber ? numberFilter : sliderFilter,
onFilter: (
value: number[] | [string, number] | undefined,
doc: Tyr.Document
) => {
if (value === undefined) return true;
const intVal = (path.get(doc) as number) || 0;
if (searchNumber) {
switch (value[0]) {
case 'Equals':
return intVal === value[1];
case 'Not equal':
return intVal !== value[1];
case 'Less Than':
return intVal < value[1];
case 'Less Than or equals':
return intVal <= value[1];
case 'Greater than':
return intVal > value[1];
case 'Greater than or equals':
return intVal >= value[1];
default:
throw new Error(`How did you pick this: ${value[0]}?`);
}
}
return intVal >= value[0] && intVal <= value[1];
},
/*
onFilterDropdownVisibleChange: (visible: boolean) => {
if (visible) {
setTimeout(() => searchInputRef!.focus());
}
}
*/
};
},
finder(path, opts, searchValue, pathProps) {
if (searchValue) {
if (!opts.query) opts.query = {};
if (pathProps?.searchNumber) {
let searchParams: object | undefined = undefined;
switch (searchValue[0]) {
case 'equals':
searchParams = searchValue[1];
break;
case 'Not equal':
searchParams = { $ne: searchValue[1] };
break;
case 'Less Than':
searchParams = { $lt: searchValue[1] };
break;
case 'Less Than or equals':
searchParams = { $lte: searchValue[1] };
break;
case 'Greater than':
searchParams = { $gt: searchValue[1] };
break;
case 'Greater than or equals':
searchParams = { $gte: searchValue[1] };
break;
}
if (searchParams) {
if (!opts.query) opts.query = {};
opts.query[path.spathArr] = searchParams;
}
} else {
const searchParams = [
{ [path.spathArr]: { $gte: searchValue[0] } },
{ [path.spathArr]: { $lte: searchValue[1] } },
];
if (opts.query.$and) {
opts.query.$and = [...opts.query.$and, ...searchParams];
} else {
opts.query.$and = searchParams;
}
}
}
},
};
registerComponent('TyrInteger', TyrInteger);
| 29.020408 | 79 | 0.514065 | 3.046875 |
02773adfef90ebe52be74848f2315ef64a8c0346
| 1,822 |
cpp
|
C++
|
algorithms/recursion/twisted_tower_of_hanoi.cpp
|
adisakshya/dsa
|
5b40eb339b19cdec95dcfc645516d725f0cb6c74
|
[
"MIT"
] | null | null | null |
algorithms/recursion/twisted_tower_of_hanoi.cpp
|
adisakshya/dsa
|
5b40eb339b19cdec95dcfc645516d725f0cb6c74
|
[
"MIT"
] | null | null | null |
algorithms/recursion/twisted_tower_of_hanoi.cpp
|
adisakshya/dsa
|
5b40eb339b19cdec95dcfc645516d725f0cb6c74
|
[
"MIT"
] | null | null | null |
/**
* Twisted Tower of Hanoi (Recursive)
*
* Twisted Tower of Hanoi is a mathematical puzzle where we have three rods and n disks.
* The objective of the puzzle is to move the entire stack to another rod, obeying the following simple rules:
* 1) Only one disk can be moved at a time.
* 2) Each move consists of taking the upper disk from one of the stacks and placing it on top of another stack
* i.e. a disk can only be moved if it is the uppermost disk on a stack.
* 3) No disk may be placed on top of a smaller disk.
* 4) No disk can be moved directly from first rod to last rod
*
* Complexity
* Time:
* Worst Case: 3^N
* Space: O(N)
*
*/
#include <bits/stdc++.h>
using namespace std;
// Function to solve Tower of Hanoi
void twisted_tower_of_hanoi(int disks, char from_rod, char to_rod, char aux_rod) {
// Base condition
if(disks == 1) {
cout << "Disk 1 moved from " << from_rod << " to " << aux_rod << endl;
cout << "Disk 1 moved from " << aux_rod << " to " << to_rod << endl;
return;
}
// shift n-1 disks from from_rod to to_rod
twisted_tower_of_hanoi(disks - 1, from_rod, to_rod, aux_rod);
cout << "Disk " << disks << " moved from " << from_rod << " to " << aux_rod << endl;
// shift n-1 disks from to_rod to from_rod
twisted_tower_of_hanoi(disks - 1, to_rod, from_rod, aux_rod);
cout << "Disk " << disks << " moved from " << aux_rod << " to " << to_rod << endl;
// shift n-1 from from_rod to to_rod
twisted_tower_of_hanoi(disks - 1, from_rod, to_rod, aux_rod);
}
int main() {
int number_of_disks = 2;
// A, B and C are name of rods
twisted_tower_of_hanoi(number_of_disks, 'A', 'C', 'B');
return 0;
}
| 35.72549 | 117 | 0.608672 | 3.328125 |
a180868a69c3672e8970a85f7e1aa2d28fb526e9
| 1,529 |
ts
|
TypeScript
|
bench/bench.ts
|
mees-/gclick
|
c8f04f06f0ecde97ced4a7736ea7f2ee5ac4c0f6
|
[
"MIT"
] | null | null | null |
bench/bench.ts
|
mees-/gclick
|
c8f04f06f0ecde97ced4a7736ea7f2ee5ac4c0f6
|
[
"MIT"
] | 1 |
2020-07-16T04:55:17.000Z
|
2020-07-16T04:55:17.000Z
|
bench/bench.ts
|
mees-/gclick
|
c8f04f06f0ecde97ced4a7736ea7f2ee5ac4c0f6
|
[
"MIT"
] | null | null | null |
import { PerformanceObserver, performance } from 'perf_hooks'
export type benchmarkOptions<Context> = {
createContext?: () => Context
beforeBench?: (context: Context) => any
beforeRun?: (context: Context) => void
runs: number
loops: number
benchMarkFunction: (context: Context) => void
afterRun?: (context: Context) => void
afterBench?: (context: Context) => void
}
export default function benchmark<Context>(options: benchmarkOptions<Context>) {
const benchStartTime = performance.now()
const context: Context =
(options.createContext && options.createContext()) || ({} as Context)
const performanceEntries: number[] = []
const observer = new PerformanceObserver(list => {
performanceEntries.push(...list.getEntries().map(entry => entry.duration))
})
observer.observe({ entryTypes: ['measure'] })
options.beforeBench && options.beforeBench(context)
for (let i = 0; i < options.runs; i++) {
options.beforeRun && options.beforeRun(context)
performance.mark(`run-${i}-start`)
for (let j = 0; j < options.loops; j++) {
options.benchMarkFunction(context)
}
performance.mark(`run-${i}-end`)
performance.measure(`run-${i}`, `run-${i}-start`, `run-${i}-end`)
performance.clearMarks()
performance.clearMeasures()
options.afterRun && options.afterRun(context)
}
options.afterBench && options.afterBench(context)
observer.disconnect()
const totalDuration = performance.now() - benchStartTime
return { performanceEntries, totalDuration }
}
| 32.531915 | 80 | 0.691302 | 3.078125 |
46fa787a2799665abb31bbabd6a7370198a7a963
| 1,359 |
py
|
Python
|
src/models/farmer_rating.py
|
BuildForSDG/Team-151-Backend
|
623a8fa3c89e6974b346214c5c53dbed22bfbf10
|
[
"MIT"
] | null | null | null |
src/models/farmer_rating.py
|
BuildForSDG/Team-151-Backend
|
623a8fa3c89e6974b346214c5c53dbed22bfbf10
|
[
"MIT"
] | 1 |
2020-05-28T13:22:49.000Z
|
2020-05-28T13:22:49.000Z
|
src/models/farmer_rating.py
|
BuildForSDG/Team-151-Backend
|
623a8fa3c89e6974b346214c5c53dbed22bfbf10
|
[
"MIT"
] | 2 |
2020-06-03T18:22:34.000Z
|
2020-06-23T08:50:12.000Z
|
from sqlalchemy.orm import relationship, backref
from src.models.Model import db
from src.models.item_category import ItemCategoryModel
from src.models.user import UserModel
class FarmerRatingModel(db.Model):
__tablename__ = 'farmer_rating'
id = db.Column(db.Integer, primary_key=True)
farmerid = db.Column(db.Integer,db.ForeignKey('users.id'))
itemid = db.Column(db.Integer,db.ForeignKey('item_categories.id'))
ratedby = db.Column(db.Integer,unique=True) #userid
rating = db.Column(db.Integer)
user = relationship(UserModel, backref=backref("farmer_rating", cascade="all, delete-orphan"))
item_category = relationship(ItemCategoryModel, backref=backref("farmer_rating", cascade="all, delete-orphan"))
def __init__(self,farmerid,itemid,ratedby,rating):
self.farmerid = farmerid
self.itemid = itemid
self.ratedby = ratedby
self.rating = rating
def save_to_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def find_by_farmerid(cls, farmerid,itemid,ratedby):
return cls.query.filter_by(farmerid=farmerid,itemid=itemid,ratedby=ratedby).first()
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
| 31.604651 | 115 | 0.709345 | 3.234375 |
8e762621e17fe7a08da5bd23155477549d50288e
| 2,544 |
rb
|
Ruby
|
config/configmapper.rb
|
IsmiKin/simplecompany
|
40a8f840725603030fe7f57698bff5bedf74ca8c
|
[
"MIT"
] | null | null | null |
config/configmapper.rb
|
IsmiKin/simplecompany
|
40a8f840725603030fe7f57698bff5bedf74ca8c
|
[
"MIT"
] | null | null | null |
config/configmapper.rb
|
IsmiKin/simplecompany
|
40a8f840725603030fe7f57698bff5bedf74ca8c
|
[
"MIT"
] | null | null | null |
#configmapper.rb
require_relative 'configvars.rb'
require 'data_mapper' # requires all the gems listed above
require 'dm-migrations'
require 'yaml'
#configdb = YAML.load(ERB.new((File.read('config/database.yml'))).result)["development"]
configdb = YAML.load((File.read('config/database.yml')))["development"]
DataMapper.setup(:default, "mysql://#{configdb['username']}:#{configdb['password']}@#{configdb['hostname']}/#{configdb['database']}")
#DataMapper.setup(:default, ENV['CLEARDB_DATABASE_URL'])
class Company
include DataMapper::Resource
property :idcompany, Serial , key: true
property :name, String , required: true , length: 1..250
property :address, Text , required: true , length: 1..250
property :country, String , required: true , length: 1..100
property :city, String , required: true , length: 1..100
property :email, Text , format: :email_address
property :phone, String
property :active, Boolean , default: true
has n, :companybks
end
class Companybk
include DataMapper::Resource
property :idcompanybk, Serial , key: true
property :name, String , required: true , length: 1..250
property :address, Text , required: true , length: 1..250
property :country, String , required: true , length: 1..100
property :city, String , required: true , length: 1..100
property :email, Text , format: :email_address
property :phone, String
property :created_at, DateTime
belongs_to :company
end
class Person
include DataMapper::Resource
property :idperson, Serial , key: true
property :name, String , length: 1..250
property :type, Text , required: true , length: 1..250
property :passport, Text , length: 500000
property :active, Boolean , default: true
has n, :personbks
belongs_to :company
end
class Personbk
include DataMapper::Resource
property :idpersonbk, Serial , key: true
property :name, String , length: 1..250
property :type, Text , required: true , length: 1..250
property :passport, Text , length: 500000
property :created_at, DateTime
belongs_to :person
belongs_to :company
end
DataMapper.finalize
# Remove comments for first launch
#DataMapper.auto_migrate!
#DataMapper.auto_upgrade!
| 31.407407 | 133 | 0.624607 | 3.03125 |
46cf5734222575ed50c588d45e87443dde5cc57a
| 9,140 |
py
|
Python
|
lab-4.py
|
Cooper-Yang/2016-OS-Course-Assignments
|
b84003cc9ffbcf1fcdee0f70ea0b5d874e5de74a
|
[
"MIT"
] | null | null | null |
lab-4.py
|
Cooper-Yang/2016-OS-Course-Assignments
|
b84003cc9ffbcf1fcdee0f70ea0b5d874e5de74a
|
[
"MIT"
] | null | null | null |
lab-4.py
|
Cooper-Yang/2016-OS-Course-Assignments
|
b84003cc9ffbcf1fcdee0f70ea0b5d874e5de74a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
OS Course Exp - 4: i-Node
Usage:
python lab-4.py [offset in file(Byte in Hex)] [content fold level = 0(default, hide block info) or 1(show index) or 2(show block content)]
# 混合索引逻辑地址到物理地址映射
* 条件:自定义混合索引 `inode` 结构
* 必须包括一次,二次,和三次间接块
* 逻辑块 `n` 对应物理块 `n`
* 输入:文件逻辑地址(这里指的是文件内的偏移位置)
* 输出
1. 输出 `inode` 详细信息(间接块不展开)
2. 物理地址(物理块号,块内偏移)
"""
from random import randint
from time import strftime
import sys
TOTAL_BLOCK = 2**32
class System(object):
"""
Define system resource
"""
def __init__(self, ):
self.used = set()
def alloc_block(self):
"""
allocate an available block from the system resource randomly, will return the block number
"""
block_num = randint(0, TOTAL_BLOCK)
while block_num in self.used is True:
block_num = randint(0, TOTAL_BLOCK)
self.used.add(block_num)
return block_num
def free_block(self, block_num=None):
"""
do not free an unused block !!!
:type block_num: int
"""
temp = list(self.used)
temp.pop(temp.index(block_num))
self.used = set(temp)
return
def use_block(self, block_num=None):
"""
:type block_num: int
"""
self.used.add(block_num)
return
SYSTEM = System()
class Block(object):
"""
Data Block
"""
def __init__(self, num=None, num_of_record=None):
"""
:type num: int
:type num_of_record: int
"""
self.num = num
self.data = list()
self.num_of_record = num_of_record
def data_append(self, input_data):
"""
:type input_data: int
"""
self.data.append(input_data)
def get_content(self, offset=None):
"""
get content of specified offset
:type offset: int
"""
temp = list(self.data)
return temp[offset]
class BlockIndex(object):
"""
Structure of direct and indirect block index
"""
def __init__(self, level=None, data_size=None, block_size=None, record_size=None, record_list=None):
"""
:type level: int
:type data_size: int
:type block_size: int
:type record_size: int
:type record_list: list
"""
self.data_size = data_size
# is 0 if is direct
self.level_num = level
self.index_list = list()
self.block_list = list()
if level == 0 and record_list is None:
self.num_of_record = None
self.record_per_block = None
self.block_size = block_size
if data_size % block_size != 0:
self.num_of_block = data_size / block_size + 1
else:
self.num_of_block = data_size / block_size
# allocation direct block
temp_a = 0
while temp_a < self.num_of_block:
self.index_list.append(temp_a)
SYSTEM.use_block(temp_a)
temp_a += 1
self.index_list.sort()
elif level > 0 and record_list is not None:
self.block_size = block_size
self.record_size = record_size
self.record_per_block = block_size / record_size
# number of record the input data have
self.num_of_record = len(record_list)
# number of block the input data need
if data_size % block_size != 0:
self.num_of_block = len(record_list) / self.record_per_block + 1
self.is_full_fill = False
else:
self.num_of_block = len(record_list) / self.record_per_block
self.is_full_fill = True
i = 0
while i < self.num_of_block:
block_num = SYSTEM.alloc_block()
self.index_list.append(block_num)
i += 1
self.index_list.sort()
self.put_input_record_into_block(record_list)
else:
raise ValueError
def put_input_record_into_block(self, input_record=None):
"""
put the previous level BlockIndex's data into this level's block
:type input_record: list
"""
if self.is_full_fill is False and self.num_of_record < self.record_per_block:
special_num = self.num_of_record
record_current_block_remain = self.num_of_record
elif self.is_full_fill is False and self.num_of_record > self.record_per_block:
special_num = self.num_of_record % self.record_per_block
record_current_block_remain = self.record_per_block
else:
special_num = None
record_current_block_remain = self.record_per_block
counter = 0
current = 0
temp = Block(self.index_list[current], record_current_block_remain)
while counter < self.num_of_record:
if record_current_block_remain != 0:
temp.data_append(input_record[counter])
record_current_block_remain -= 1
else:
self.block_list.append(temp)
current += 1
block_num = self.index_list[current]
if current == self.num_of_block - 1 and self.is_full_fill is False:
temp = Block(block_num, special_num)
else:
temp = Block(block_num, self.record_per_block)
record_current_block_remain = temp.num_of_record
temp.data_append(input_record[counter])
record_current_block_remain -= 1
counter += 1
# append the last Block or due to the mechanism of while it will be dropped
self.block_list.append(temp)
def get_data(self, index=None):
"""
:type index:int
"""
block_num = index / self.record_per_block
offset_num = index % self.record_per_block
data = self.block_list[block_num].data[offset_num]
return data
def get_info(self, content_fold=0):
"""
Return specified size of data according to specified index
:type content_fold: int
"""
lines = list()
if self.level_num == 0:
lines.append('Direct Block Index Table:\n')
else:
lines.append('Level ' + str(self.level_num) + ' Block Index Table:\n')
lines.append('\thave ' + str(self.num_of_record) + ' records\n')
lines.append('\thave ' + str(self.num_of_block) + ' blocks\n')
lines.append('\ttake ' + str(self.num_of_block * self.block_size) + ' Byte space\n')
if self.level_num == 0:
pass
else:
if content_fold == 0:
pass
else:
k = 0
while k < len(self.block_list):
lines.append('Block ' + str(k).rjust(4) + ' : address - ' + hex(self.block_list[k].num)+' :\n')
count = 0
hex_list = list()
while count < self.block_list[k].num_of_record:
if content_fold == 1:
hex_list.append(hex(count))
elif content_fold == 2:
hex_list.append(hex(self.block_list[k].data[count]))
count += 1
k += 1
lines.append(str(hex_list) + '\n')
lines.append('\n')
return lines
class IndexNode(object):
"""
I-node Structure
"""
def __init__(self, file_name=None, owner=None, file_size=None, num_of_level=None, block_size=None, record_size=None):
"""
:type file_name: str
:type owner: str
:type file_size: int
:type num_of_level: int
:type block_size: int
:type record_size: int
"""
if block_size % record_size != 0:
raise ValueError
self.file_name = file_name
self.owner = owner
self.time_stamp = strftime('%Y/%m/%d %X')
self.file_size = file_size
self.num_of_level = num_of_level
self.block_size = block_size
self.record_size = record_size
self.record_per_block = block_size / record_size
self.block_index = list()
self.generate_index()
def generate_index(self):
"""
generate all direct and indirect index
"""
for i in range(0, self.num_of_level+1):
if i == 0:
temp = BlockIndex(i, self.file_size, self.block_size, self.record_size)
else:
data_size = self.block_index[i-1].num_of_block * self.record_size
temp = BlockIndex(i, data_size, self.block_size, self.record_size, self.block_index[i-1].index_list)
self.block_index.append(temp)
return
def find_block(self, input_address=None):
"""
:type input_address: int
"""
trace = list()
in_file_block_num = input_address / self.block_size
index_num = in_file_block_num
i = 1
while i <= self.num_of_level:
content = str(index_num) + ' - ' + hex(self.block_index[i].get_data(index_num))
trace.insert(0, content)
index_num /= self.record_per_block
i += 1
# content = str(index_num) + ' - ' + hex(self.block_index[i].get_data(index_num))
trace.insert(0, index_num)
return 'Address Trace: ' + str(trace)
def output(self, content_fold=0):
"""
output itself
:type content_fold: int
"""
line = list()
line.append('File Name: ' + str(self.file_name) + '\n')
line.append('Owner: ' + str(self.owner) + '\n')
line.append('Time: ' + str(self.time_stamp) + '\n')
line.append('File Size: ' + str(self.file_size) + ' Byte\n')
line.append('Index Level: ' + str(self.num_of_level) + '\n\n')
for i in range(0, self.num_of_level+1):
line.extend(self.block_index[i].get_info(content_fold))
return line
if __name__ == "__main__":
if len(sys.argv) > 1:
FILE_NAME = 'I am awesome'
OWNER = 'Cooper Yang'
FILE_SIZE = 2**20
LEVEL = 3
BLOCK_SIZE = 2**8
RECORD_SIZE = 2**2
INPUT_ADDRESS = int(sys.argv[1], 16)
if INPUT_ADDRESS > FILE_SIZE:
print '\n'
print '!!! this address is even bigger than the file !!!'
print '\n'
raise ValueError
else:
print '\n'
print __doc__
print '\n'
raise NameError
MY_FILE = IndexNode(FILE_NAME, OWNER, FILE_SIZE, LEVEL, BLOCK_SIZE, RECORD_SIZE)
OUTPUT = open('lab-4.result', 'w')
if len(sys.argv) > 2 and sys.argv[2] == '1':
OUTPUT.writelines(MY_FILE.output(content_fold=1))
elif len(sys.argv) > 2 and sys.argv[2] == '2':
OUTPUT.writelines(MY_FILE.output(content_fold=2))
else:
OUTPUT.writelines(MY_FILE.output(content_fold=0))
OUTPUT.writelines(MY_FILE.find_block(INPUT_ADDRESS))
OUTPUT.close()
print 'completed !'
| 29.015873 | 139 | 0.690481 | 3.171875 |
1a8938c1509af3e92adc1be12836d3a25961447a
| 955 |
py
|
Python
|
process_incomplete_tiles.py
|
tommygod3/fyp-scripts
|
603e4f5025f297c2242f23b1ed56991606dff31d
|
[
"MIT"
] | null | null | null |
process_incomplete_tiles.py
|
tommygod3/fyp-scripts
|
603e4f5025f297c2242f23b1ed56991606dff31d
|
[
"MIT"
] | null | null | null |
process_incomplete_tiles.py
|
tommygod3/fyp-scripts
|
603e4f5025f297c2242f23b1ed56991606dff31d
|
[
"MIT"
] | null | null | null |
import json, subprocess, glob, sys, os
import pathlib, argparse
# load environment config and set path vars
file_path = os.path.realpath(__file__)
directory_path = "/".join(file_path.split("/")[:-1])
with open(f"{directory_path}/environment.json") as reader:
environment = json.load(reader)
def process_all(top_directory):
for directory in glob.iglob(f"{top_directory}/*"):
subprocess.call(f"bsub -o {directory}/%J.out -W 5:00 -q short-serial {sys.executable} {directory_path}/process_tile.py -d {directory}/", shell=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=
'This script scrolls through data index and processes tiles with low cloud cover')
parser.add_argument('-d', '--dir', dest = 'directory',
help = 'dir to create tiles underneath')
args = parser.parse_args()
absolute_dir = pathlib.Path(args.directory).resolve()
process_all(absolute_dir)
| 39.791667 | 156 | 0.699476 | 3.015625 |
2b9b83707c6ff7809c74b0ef86a9c0da5679b855
| 2,705 |
dart
|
Dart
|
enigma_dart/lib/src/widgets/text/enigma_form.dart
|
gedzeppelin/enigma_dart
|
32609114a65794b5028d39a593bfbdf999e10e40
|
[
"BSD-3-Clause"
] | 1 |
2021-12-24T17:11:27.000Z
|
2021-12-24T17:11:27.000Z
|
enigma_dart/lib/src/widgets/text/enigma_form.dart
|
gedzeppelin/enigma_dart
|
32609114a65794b5028d39a593bfbdf999e10e40
|
[
"BSD-3-Clause"
] | null | null | null |
enigma_dart/lib/src/widgets/text/enigma_form.dart
|
gedzeppelin/enigma_dart
|
32609114a65794b5028d39a593bfbdf999e10e40
|
[
"BSD-3-Clause"
] | null | null | null |
import "package:flutter/material.dart";
import "enigma_text_field.dart";
typedef Widget FormBuilder(BuildContext context, List<EgTextField> formItems);
class EnigmaForm extends StatefulWidget {
EnigmaForm({
Key? key,
required this.formItems,
this.formBuilder,
this.isExpanded = true,
this.isScrollable = true,
this.padding,
}) : super(key: key);
final FormBuilder? formBuilder;
final EdgeInsets? padding;
final bool isExpanded;
final bool isScrollable;
final List<EgTextField> formItems;
@override
EnigmaFormState createState() => EnigmaFormState();
}
class EnigmaFormState extends State<EnigmaForm> {
List<FocusNode>? focusNodeList = null;
List<EgTextField>? formItems = null;
final formKey = GlobalKey<FormState>();
bool _initialized = false;
@override
void dispose() {
focusNodeList?.forEach((fn) => fn.dispose());
super.dispose();
}
/* @override
void initState() {
super.initState();
final nodeSize = widget.formItems.length;
// Generate a FocusNode for each form text field.
focusNodeList = List<FocusNode>.generate(
nodeSize,
(_) => FocusNode(),
);
formItems = List<EgTextField>.generate(
nodeSize,
(int idx) => widget.formItems[idx].copyWith(
initialized: _initialized,
focusNode: focusNodeList[idx],
nextNode: idx < nodeSize - 1 ? focusNodeList[idx + 1] : null,
),
);
} */
bool get isValid {
if (!_initialized) {
setState(() {
_initialized = true;
});
}
return formKey.currentState?.validate() ?? false;
}
@override
Widget build(BuildContext context) {
final nodeSize = widget.formItems.length;
// Generate a FocusNode for each form text field.
final focusNodeList = List<FocusNode>.generate(
nodeSize,
(_) => FocusNode(),
);
final formItems = List<EgTextField>.generate(
nodeSize,
(int idx) => widget.formItems[idx].copyWith(
initialized: _initialized,
focusNode: focusNodeList[idx],
nextNode: idx < nodeSize - 1 ? focusNodeList[idx + 1] : null,
),
);
this.focusNodeList = focusNodeList;
final form = Form(
key: formKey,
child: widget.formBuilder?.call(context, formItems) ??
Column(children: formItems),
);
final body = widget.isScrollable
? SingleChildScrollView(
padding: widget.padding ?? const EdgeInsets.all(16.0),
child: form,
)
: Padding(
padding: widget.padding ?? const EdgeInsets.all(16.0),
child: form,
);
return widget.isExpanded ? Expanded(child: body) : body;
}
}
| 24.590909 | 78 | 0.633272 | 3.1875 |
238cfe40b8045fad0e536a2fc00cfcc605b64673
| 4,410 |
js
|
JavaScript
|
static/js/wallboxsvc.js
|
hardcodes/wallboxsvc
|
078f41dd1a51ef5212f0a8df14dc56cd01fbe746
|
[
"MIT"
] | null | null | null |
static/js/wallboxsvc.js
|
hardcodes/wallboxsvc
|
078f41dd1a51ef5212f0a8df14dc56cd01fbe746
|
[
"MIT"
] | null | null | null |
static/js/wallboxsvc.js
|
hardcodes/wallboxsvc
|
078f41dd1a51ef5212f0a8df14dc56cd01fbe746
|
[
"MIT"
] | null | null | null |
function showErrorMessage(messagetext) {
if (!document.getElementById("ErrorMessage")) {
var div = document.createElement('div');
div.setAttribute('class', 'error-msg');
div.setAttribute('role', 'alert');
div.setAttribute('id', 'ErrorMessage');
document.body.appendChild(div);
}
document.getElementById("ErrorMessage").innerHTML = messagetext;
document.getElementById("ErrorMessage").style.visibility = "visible";
}
function hideErrorMessage() {
document.getElementById("ErrorMessage").style.visibility = "hidden";
}
function showErrorMessageWithTimer(messagetext, seconds = 0) {
showErrorMessage(messagetext);
if (seconds != 0) {
var timer = window.setTimeout(hideErrorMessage, (seconds * 1000));
}
}
function showSuccessMessage(messagetext) {
if (!document.getElementById("SuccessMessage")) {
var div = document.createElement('div');
div.setAttribute('class', 'success-msg');
div.setAttribute('role', 'alert');
div.setAttribute('id', 'SuccessMessage');
document.body.appendChild(div);
}
document.getElementById("SuccessMessage").innerHTML = messagetext;
document.getElementById("SuccessMessage").style.visibility = "visible";
}
function hideSuccessMessage() {
document.getElementById("SuccessMessage").style.visibility = "hidden";
}
function showSuccessMessageWithTimer(messagetext, seconds = 0) {
showSuccessMessage(messagetext);
if (seconds != 0) {
var timer = window.setTimeout(hideSuccessMessage, (seconds * 1000));
}
}
function showNotifyMessage(messagetext) {
if (!document.getElementById("NotifyMessage")) {
var div = document.createElement('div');
div.setAttribute('class', 'notify-msg');
div.setAttribute('id', 'NotifyMessage');
document.body.appendChild(div);
}
document.getElementById("NotifyMessage").innerHTML = messagetext;
document.getElementById("NotifyMessage").style.display = "inline-block";
document.getElementById("NotifyMessage").style.visibility = "visible";
}
function startProgressSpinner() {
if (!document.getElementById("ProgressSpinner")) {
var div = document.createElement('div');
div.setAttribute('class', 'progress-spinner');
div.setAttribute('id', 'ProgressSpinner');
document.body.appendChild(div);
}
document.getElementById("ProgressSpinner").style.visibility = "visible";
}
function stopProgressSpinner() {
document.getElementById("ProgressSpinner").style.visibility = "hidden";
}
function accessWebService(url, okcallback, errorcallback, method = "GET", formdata = null, seconds = 0, timeout = 0) {
var xhttp = new XMLHttpRequest();
xhttp.onreadystatechange = function () {
if (this.readyState == 4 && this.status == 200) {
// Typical action to be performed when the document is ready:
stopProgressSpinner();
if (this.responseText.includes("ERROR:")) {
showErrorMessageWithTimer(this.responseText, seconds);
if (typeof errorcallback == "function") {
errorcallback();
}
}
else {
if (typeof okcallback == "function") {
okcallback(this.responseText);
}
}
}
};
xhttp.ontimeout = function () {
showErrorMessageWithTimer("Timeout: can not load data!", seconds);
stopProgressSpinner();
};
xhttp.onabort = function () {
showErrorMessageWithTimer("Timeout: loading the data was interrupted!", seconds);
stopProgressSpinner();
};
xhttp.onerror = function () {
showErrorMessageWithTimer("Timeout: error while loading the data!", seconds);
stopProgressSpinner();
};
startProgressSpinner();
xhttp.open(method, url, true);
xhttp.timeout = timeout;
if (method === "POST") {
xhttp.setRequestHeader("Content-Type", "application/x-www-form-urlencoded", seconds);
}
xhttp.send(formdata);
}
function queryWebService(url, okcallback, errorcallback, seconds = 0, timeout = 0) {
accessWebService(url, okcallback, errorcallback, "GET", null, seconds, timeout)
}
function sendToWebService(url, okcallback, errorcallback, formdata, seconds = 0, timeout = 0) {
accessWebService(url, okcallback, errorcallback, "POST", formdata, seconds, timeout)
}
| 36.147541 | 120 | 0.665533 | 3.0625 |
20dcd77cc5dd5808f5c3b4f44a264eb8b24a7ba0
| 1,525 |
py
|
Python
|
sli/sli_api.py
|
PaloAltoNetworks/SLI
|
0bf0aa449e45fdfd6553d69776d4fe0e8b52675e
|
[
"Apache-2.0"
] | null | null | null |
sli/sli_api.py
|
PaloAltoNetworks/SLI
|
0bf0aa449e45fdfd6553d69776d4fe0e8b52675e
|
[
"Apache-2.0"
] | 2 |
2021-09-10T17:42:18.000Z
|
2021-09-10T18:57:59.000Z
|
sli/sli_api.py
|
PaloAltoNetworks/SLI
|
0bf0aa449e45fdfd6553d69776d4fe0e8b52675e
|
[
"Apache-2.0"
] | null | null | null |
"""
A Programamtic entrypoint for sli commands, populates sys.argv
as if the command were called from the CLI directly.
usage:
from sli.sli_api import run_command
run_command("sli configure -n my_skillet.skillet.yaml -d 192.168.1.1 -u username -p password")
"""
import sys
from sli.cli import cli
from sli.errors import SLIException, InvalidArgumentsException
def start_quote(text):
"""Check for text starting quote"""
return text.startswith("'") or text.startswith('"')
def end_quote(text):
"""Check for text ending quote"""
return text.endswith("'") or text.endswith('"')
def remove_quote(text):
"""Remove quotes from text"""
return text.replace("'", "").replace('"', "")
def sli_command(command):
"""
Break a given command apart and load it into sys.argv as if
it was processed normally from the cli
"""
cmd = command.strip().split(" ")
if not cmd[0] == "sli":
raise SLIException("SLI command did not start with sli")
cmd = cmd[1:]
arg = ''
in_arg = False
for c in cmd:
if start_quote(c):
in_arg = True
arg += remove_quote(c)
elif in_arg and end_quote(c):
in_arg = False
arg += " " + remove_quote(c)
sys.argv.append(arg)
arg = ''
elif in_arg:
arg += " " + c
else:
sys.argv.append(c)
if in_arg:
raise InvalidArgumentsException("No closing quote found in SLI arguments")
cli(standalone_mode=False)
| 26.754386 | 94 | 0.617705 | 3.4375 |
09305a4504d99949bc232884eb7d6d2b80a57568
| 1,805 |
lua
|
Lua
|
Lua/TickService.lua
|
yuyang158/xLua-Extend
|
545207583edc54cc3faed3ea5a11d9e6d0cedaf1
|
[
"MIT"
] | 10 |
2020-07-30T10:39:27.000Z
|
2021-06-28T07:48:57.000Z
|
Lua/TickService.lua
|
yuyang158/xLua-Extend
|
545207583edc54cc3faed3ea5a11d9e6d0cedaf1
|
[
"MIT"
] | 1 |
2020-08-20T13:37:04.000Z
|
2020-08-20T13:37:04.000Z
|
Lua/TickService.lua
|
yuyang158/xLua-Extend
|
545207583edc54cc3faed3ea5a11d9e6d0cedaf1
|
[
"MIT"
] | 3 |
2021-01-18T04:00:33.000Z
|
2021-04-23T01:56:47.000Z
|
local pairs, next, table, xpcall, setmetatable = pairs, next, table, xpcall, setmetatable
local util = require("util")
local uv = require "luv"
---@class TickService
local M = {}
local tickers = {}
local tickerToAdd = {}
function M.Init()
setmetatable(tickers, {__mode = "k"})
end
function M.Register(func, ...)
local packed = table.pack(...)
tickerToAdd[func] = packed
end
function M.Tick(deltaTime)
uv.run("nowait")
for func, pack in pairs(tickerToAdd) do
tickers[func] = pack
end
tickerToAdd = {}
for func, packed in pairs(tickers) do
if packed.n ~= 0 then
util.xpcall_catch(func, table.unpack(packed), deltaTime)
else
util.xpcall_catch(func, deltaTime)
end
end
end
---@param seconds number 超时时间
---@param repeatTimes integer 重复次数, -1无限重复
---@return function 调用后移除
function M.Timeout(seconds, repeatTimes, callback, ...)
local timer = uv.new_timer()
local start, interval
if type(seconds) == "table" then
start = math.floor(seconds.start * 1000)
interval = math.floor(seconds.interval * 1000)
else
start = math.floor(seconds * 1000)
interval = math.floor(seconds * 1000)
end
local args = table.pack(...)
timer:start(start, interval, function()
local ok, complete = util.xpcall_catch(callback, table.unpack(args))
if not ok or complete then
timer:close()
return
end
if repeatTimes > 0 then
repeatTimes = repeatTimes - 1
if repeatTimes == 0 then
timer:close()
end
end
end)
return function()
timer:close()
end
end
function M.Unregister(func)
tickerToAdd[func] = nil
tickers[func] = nil
end
return M
| 24.066667 | 89 | 0.612188 | 3.453125 |
cdc3660f0a40604304a91693fe7b4c423eacb6ee
| 1,161 |
sql
|
SQL
|
Src/Entity.Database/EntityCode/Stored Procedures/VentureOptionSave.sql
|
goodtocode/Entities
|
134a692973c80ccd417b36e303e9e28dc05c3bb9
|
[
"Apache-2.0"
] | 1 |
2020-08-13T00:49:48.000Z
|
2020-08-13T00:49:48.000Z
|
Src/Entity.Database/EntityCode/Stored Procedures/VentureOptionSave.sql
|
goodtocode/Entities
|
134a692973c80ccd417b36e303e9e28dc05c3bb9
|
[
"Apache-2.0"
] | null | null | null |
Src/Entity.Database/EntityCode/Stored Procedures/VentureOptionSave.sql
|
goodtocode/Entities
|
134a692973c80ccd417b36e303e9e28dc05c3bb9
|
[
"Apache-2.0"
] | 1 |
2021-03-07T03:04:34.000Z
|
2021-03-07T03:04:34.000Z
|
Create PROCEDURE [EntityCode].[VentureOptionSave]
@Id Int,
@Key Uniqueidentifier,
@VentureKey Uniqueidentifier,
@OptionKey Uniqueidentifier
AS
-- Id and Key are both valid. Sync now.
If (@Id <> -1) Select Top 1 @Key = IsNull([VentureOptionKey], @Key) From [Entity].[VentureOption] Where [VentureOptionId] = @Id
If (@Id = -1 AND @Key <> '00000000-0000-0000-0000-000000000000') Select Top 1 @Id = IsNull([VentureOptionId], -1) From [Entity].[VentureOption] Where [VentureOptionKey] = @Key
-- Insert vs. Update
If (@Id Is Null) Or (@Id = -1)
Begin
-- Insert
Select @Key = IsNull(NullIf(@Key, '00000000-0000-0000-0000-000000000000'), NewId())
Insert Into [Entity].[VentureOption] (VentureOptionKey, VentureKey, OptionKey)
Values (@Key, @VentureKey, @OptionKey)
Select @Id = SCOPE_IDENTITY()
End
Else
Begin
-- VentureOption master
Update [Entity].[VentureOption]
Set OptionKey = @OptionKey,
ModifiedDate = GetUTCDate()
Where VentureOptionId = @Id
End
-- Return data
Select IsNull(@Id, -1) As Id, IsNull(@Key, '00000000-0000-0000-0000-000000000000') As [Key]
| 38.7 | 176 | 0.664944 | 3.296875 |
4f219fc69e9791d8097b1f72c3b7145dc3001596
| 1,310 |
ps1
|
PowerShell
|
chocolatey-visualstudio.extension/extensions/Get-VisualStudioInstance.ps1
|
kc/ChocolateyPackages
|
a81267a008d6be8d37aaad09e4e470f260e98c77
|
[
"MIT"
] | null | null | null |
chocolatey-visualstudio.extension/extensions/Get-VisualStudioInstance.ps1
|
kc/ChocolateyPackages
|
a81267a008d6be8d37aaad09e4e470f260e98c77
|
[
"MIT"
] | null | null | null |
chocolatey-visualstudio.extension/extensions/Get-VisualStudioInstance.ps1
|
kc/ChocolateyPackages
|
a81267a008d6be8d37aaad09e4e470f260e98c77
|
[
"MIT"
] | null | null | null |
function Get-VisualStudioInstance
{
<#
.SYNOPSIS
Returns information about installed Visual Studio instances.
.DESCRIPTION
For each Visual Studio instance installed on the machine, this function returns an object
containing the basic properties of the instance.
.OUTPUTS
A System.Management.Automation.PSObject with the following properties:
InstallationPath (System.String)
InstallationVersion (System.Version)
ProductId (System.String; Visual Studio 2017 only)
ChannelId (System.String; Visual Studio 2017 only)
#>
[CmdletBinding()]
Param
(
)
Get-WillowInstalledProducts | Where-Object { $_ -ne $null } | ForEach-Object {
$props = @{
InstallationPath = $_.installationPath
InstallationVersion = [version]$_.installationVersion
ProductId = $_.productId
ChannelId = $_.channelId
}
$obj = New-Object -TypeName PSObject -Property $props
Write-Output $obj
}
Get-VSLegacyInstance | Where-Object { $_ -ne $null } | ForEach-Object {
$props = @{
InstallationPath = $_.Path
InstallationVersion = $_.Version
ProductId = $null
ChannelId = $null
}
$obj = New-Object -TypeName PSObject -Property $props
Write-Output $obj
}
}
| 29.111111 | 89 | 0.660305 | 3.09375 |
e277c83f669dcff60d45f42e154fb1183a26d8d6
| 22,437 |
py
|
Python
|
glearn/nerualnetworks/nn.py
|
ggutierrez545/neuralnetworks
|
891fc622515af765b8529091b99e377215e195ca
|
[
"MIT"
] | null | null | null |
glearn/nerualnetworks/nn.py
|
ggutierrez545/neuralnetworks
|
891fc622515af765b8529091b99e377215e195ca
|
[
"MIT"
] | 7 |
2020-07-17T23:58:40.000Z
|
2020-08-17T21:28:21.000Z
|
glearn/nerualnetworks/nn.py
|
ggutierrez545/neuralnetworks
|
891fc622515af765b8529091b99e377215e195ca
|
[
"MIT"
] | null | null | null |
import numpy as np
from ..utils.activation import activation, loss
class NeuralNetwork(object):
"""Base class representation of a neural network.
Contains the logic and framework to build any fully connected,
feed-forward neural network.
Parameters
----------
seed : int
Seed for pseudo random number generation.
l_rate : float
Learning rate for gradient descent back propagation.
m_factor : float
Momentum factor for gradient descent back propagation.
Attributes
----------
seed
l_rate
m_factor
input_layer : :obj:`None` or :obj:`InputLayer`
None when `NeuralNetwork` class is initialized.
`InputLayer` instance once user calls `add_input_layer` method.
layers : :obj:`list` of :obj:`InputLayer` and :obj:`ConnectedLayer`
List containing the `InputLayer` and `ConnectedLayer`
instances that form the architecture of the `NeuralNetwork`.
First element in list is an `InputLayer` instance and
all subsequent elements are `ConnectedLayer` instances.
segments : list
List containing `ConnectedSegment` instances which contain much of the
primary feed forward / back propagation logic.
Methods
-------
input_layer()
Get or set the current `InputLayer` instance.
add_input_layer(size)
Add an `InputLayer` instance to the `NeuralNetwork` and append it to the `layers` attribute.
add_connected_layer(size)
Add an `InputLayer` instance to the `NeuralNetwork' and append it to the `layers` attribute.
nn_feedforward(x, a_function='relu')
Feed the `NeuralNetwork` an example to make a prediction on.
nn_backpropagate(truth, a_function='relu', updater='sgd', batch_size=50, momentum=True)
Back propagate the resulting error from a `nn_feedforward` pass.
"""
def __init__(self, seed=10, l_rate=0.01, m_factor=0.9, loss_func='mean-squared'):
self.seed = seed
self.l_rate = l_rate
self.m_factor = m_factor
self.input_layer = None
self.layers = []
self.segments = []
self.loss_func = loss_func
np.random.seed(seed)
@property
def input_layer(self):
""":obj:`InputLayer` : `InputLayer` instance serving as the first layer in the `NeuralNetwork`.
Setter method ensures input_layer must be `InputLayer` instance.
Raises
------
AssertionError
If input_layer attempted is not `InputLayer` instance.
"""
return self.__input_layer
@input_layer.setter
def input_layer(self, input_layer):
if input_layer is None:
self.__input_layer = None
else:
assert type(input_layer) is InputLayer, f"Cannot set input_layer with {type(input_layer)}; must be InputLayer instance"
self.__input_layer = input_layer
def add_input_layer(self, size):
"""Method to add an input layer of inputted size.
Parameters
----------
size : int or tuple
Number of neurons in input layer or shape of input image.
Raises
------
AssertionError
If `NeuralNetwork` instance already contains an `InputLayer` in the `layers` attribute.
Notes
-----
Method does not return anything. Instead, it sets the `input_layer` attribute to an `InputLayer`
instance and appends it to the beginning of the `layers` attribute.
"""
# Before adding an InputLayer, verify one has not already been initialized
if [type(i) for i in self.layers].__contains__(InputLayer):
raise AssertionError("NeuralNetwork instance already contains InputLayer")
else:
# Accessibility of InputLayer makes feedforward method much easier. Same instance of InputLayer class is
# referenced in both self.input_layer and self.layers
self.input_layer = InputLayer(size, parent=self.__class__)
self.layers.append(self.input_layer)
def add_connected_layer(self, size, activation_function='relu'):
"""Method to add `ConnectedLayer` of inputted size.
Parameters
----------
size : int
Number of neurons in connected layer.
activation_function : str
Keyword indicating the activation function to use for the layer.
Raises
------
AssertionError
If `NeuralNetwork` does not already contain an `InputLayer` instance.
"""
# Before adding ConnectedLayer, verify an InputLayer has already been initialized
if [type(i) for i in self.layers].__contains__(InputLayer):
self.layers.append(ConnectedLayer(size, activation=activation_function))
# After each ConnectedLayer is added, create a ConnectedSegment from the last two elements in self.layers.
# Using elements from self.layers to create the ConnectedSegment instance allows the chain of InputLayer and
# ConnectedLayer references to be maintained. This is crucial for this architecture
self.segments.append(ConnectedSegment(*self.layers[-2:]))
else:
raise AssertionError("NeuralNetwork instance must contain an InputLayer before adding a ConnectedLayer")
def feedforward(self, x):
"""Method to feed forward an example through the `NeuralNetwork` and make a prediction.
Parameters
----------
x : `numpy.ndarray`
Numpy array containing training example input data.
Notes
-----
`x` overwrites the `act_vals` attribute in the `NeuralNetwork` instance's `InputLayer` allowing the information
to be transfered to the `ConnectedSegment` instance containing the `InputLayer` as well, thereby making the
feed forward process very simple.
"""
# Update the InputLayer instance with a new set of values. This update will now be available in the first
# ConnectedSegment instance in the self.segments list.
self.input_layer.act_vals = x
# And just simply iterate through each ConnectedSegment instance, calling the forward_pass method
# on each which will update the relevant ConnectedLayer for use in the next ConnectedSegment
for segment in self.segments:
segment.forward_pass()
def backpropagate(self, truth, updater='sgd', batch_size=50, momentum=True):
"""Method to back propagate the error from a training example
Parameters
----------
truth : `np.ndarray`
Array depicting the training example's actual value.
updater : :obj:str, default 'sgd'
String keyword for weights and biases updater method. Support keywords are 'sgd' and 'mini-batch'.
batch_size : :obj:int, default 50
Size of the mini-batch to update.
momentum : :obj:bool, default `True`
Toggle to include momentum calculation in updater method.
"""
cost = loss(self.layers[-1].act_vals, truth, loss_type=self.loss_func)
delta = None
for segment in reversed(self.segments):
if delta is None:
activated = activation(segment.back.raw_vals, func=segment.back.a_func, derivative=True)
if self.loss_func == 'cross-entropy':
delta = (cost.T @ activated).reshape(-1, 1)
else:
delta = cost * activated
segment.back_propagate(delta)
delta = segment.setup_next_delta(delta)
segment.update_weights(self.l_rate, self.m_factor, updater=updater, batch_size=batch_size, momentum=momentum)
class InputLayer(object):
"""Simple class depicting the first layer in a neural network.
Serves as base class for :obj:`ConnectedLayer`.
Parameters
----------
size : int
Number of neurons in the layer.
Attributes
----------
size : int
Number of neurons or rows of neurons in input layer.
shape : tuple
Shape of the layer as an array.
act_vals : :obj:`None` or :obj:`numpy.ndarray`
Array of activation values.
"""
def __init__(self, size, parent):
self._size_shape(size)
self.act_vals = None
self._parent = parent
def _size_shape(self, size):
"""Determine if input is a vector or an array, i.e. if an image or not.
Parameters
----------
size : int or tuple
"""
if type(size) is tuple:
self.shape = size
else:
self.shape = (size, 1)
self.size = int(np.prod(self.shape))
@property
def act_vals(self):
"""Array container for activation values.
Setter method has a number of checks to ensure the new `act_vals` is the same size and shape as the
previous `act_vals`. This is to maintain dimensional continuity within the `NeuralNetwork` instance.
Raises
------
AssertionError
If number of neurons in new `act_vals` does not match previous neuron count.
ValueError
If new `act_vals` array shape does not match previous `act_vals` array shape.
"""
return self.__act_vals
@act_vals.setter
def act_vals(self, act_vals):
try:
assert np.prod(act_vals.shape) == self.size, f"New layer size, {len(act_vals)}, != initial layer size {self.size}"
if self._parent is NeuralNetwork:
self.__act_vals = act_vals.reshape(-1, 1)
else:
self.__act_vals = act_vals
except AttributeError:
self.__act_vals = None
class ConnectedLayer(InputLayer):
"""Child class of `InputLayer` depicting connected layers in a neural network.
Parameters
----------
size : int
Number of neurons in the connected layer.
activation : str
Keyword indicating the type of activation function to use.
Attributes
----------
raw_vals : :obj:`None` or :obj:`numpy.array`
Layer's raw values, i.e. before passing through activation function.
biases : `numpy.array`
Bias value associated with each neuron in the layer.
See Also
--------
`InputLayer`
"""
def __init__(self, size, activation='', parent=''):
super().__init__(size, parent=parent)
self.raw_vals = None
self.biases = np.zeros((size, 1)) + 0.01
self.a_func = activation
@property
def raw_vals(self):
"""Array container for layer values pre-activation function.
Setter method verifies the new `raw_vals` array shape matches the old `raw_vals` array shape. If the attempted
`raw_vals` is not an array and does not have a `shape` method, `raw_vals` is set to `None`.
Raises
------
AssertionError
If new `raw_vals` array shape is not equal to previous shape.
"""
return self.__raw_vals
@raw_vals.setter
def raw_vals(self, raw_vals):
try:
assert raw_vals.shape == self.shape, f"Raw layer shape, {raw_vals.shape}, != initial shape {self.shape}"
self.__raw_vals = raw_vals
self.act_vals = activation(self.raw_vals, func=self.a_func)
except AttributeError:
self.__raw_vals = None
@property
def biases(self):
"""Array container for neurons' bias terms.
Setter method verifies attempted `biases` array shape matches old shape.
Raises
------
AssertionError
If attempted `biases` shape does not match original shape.
"""
return self.__biases
@biases.setter
def biases(self, biases):
assert biases.shape == self.shape, f"New biases shape, {biases.shape}, != shape of layer {self.shape}"
self.__biases = biases
class ConnectedSegment(object):
"""Container class for two layers in a `NeuralNetwork` instance.
`ConnectedSegment` instances contain the weights between two layers in the neural network, as well as much of the logic
for the feed forward and back propagation methods of the `NeuralNetwork` class. Consecutive `ConnectedSegment` instances
have overlapping `front` and `back` layers (i.e they are the same `ConnectedLayer` instance). This architecture
allows for easy access to either preceding or following layers when making calculations and allows the `NeSegment`
class to contain simplified logic feed forward and back propagation applications.
Parameters
----------
input_layer : :obj:`InputLayer` or :obj:`ConnectedLayer`
The first layer in the `ConnectedSegment` instance.
output_layer : `ConnectedLayer`
The last layer in the `ConnectedSegment` instance.
Attributes
----------
front : :obj:`InputLayer` or :obj:`ConnectedLayer`
The first layer in the `ConnectedSegment` instance.
back : `ConnectedLayer`
The last layer in the `ConnectedSegment` instance.
weights : `ndarray`
Weights of connections between front and back layers.
shape : tuple
Shape of the weights array.
w_updates : :obj:`None` or :obj:`ndarray`
Array containing each weights' update calculated from back propagation.
prev_w_updates : :obj:`int` or :obj:`ndarray`
Array containing previous weights' update for use with momentum.
w_batch : :obj:`None` or :obj:`ndarray`
Array containing sum of weight updates for mini-batch sgd.
b_updates : :obj:`None` or :obj:`ndarray`
Array containing each biases' update calculated from back propagation.
prev_b_updates : :obj:`int` or :obj:`ndarray`
Array containing previous biases' update for use with momentum.
b_batch : :obj:`None` or :obj:`ndarray`
Array containing sum of bias updates for mini-batch sgd.
forward_passes : int
Number of times a training example has been fed forward.
Methods
-------
forward_pass(activation='')
Calculate a forward pass from the front layer to the back layer.
back_propagate(delta)
Calculate the weight updates via back propagation.
setup_next_delta(delta, activation='')
Setup delta value for use in next `NegSegment` instance.
update_weights(l_rate, m_factor, updater='', batch_size=50, momentum=True)
Update the weights based on back propagation pass.
activation(val, func='', derivative=False)
Static method to access various activation functions.
"""
def __init__(self, input_layer, output_layer):
self.front = input_layer
self.back = output_layer
self.shape = None
self.weights = None
self._create_weights()
self.w_updates = None
self.prev_w_updates = 0
self.w_batch = None
self.b_updates = None
self.prev_b_updates = 0
self.b_batch = None
self.forward_passes = 0
self.weight_hist = []
def _create_weights(self):
if type(self.front) in [InputLayer, ConnectedLayer]:
self.weights = np.random.randn(self.back.size, self.front.size) * np.sqrt(1 / self.front.size)
else:
self.weights = np.random.randn(self.back.size, self.front.output_size) * np.sqrt(1 / self.front.output_size)
self.shape = self.weights.shape
@property
def weights(self):
"""Array container for the weights connecting the front layer to the back layer.
Setter method contains logic to ensure consistent dimensions when updating weights.
Raises
------
ValueError
If attempting to set weights array that does not match original shape of weights array.
"""
return self.__weights
@weights.setter
def weights(self, weights):
if self.shape is not None:
try:
if weights.shape != self.shape:
raise ValueError(f"Updated weights shape, {weights.shape}, != initial weights shape {self.shape}")
else:
self.__weights = weights
except AttributeError:
self.__weights = weights
else:
self.__weights = weights
@property
def w_updates(self):
"""Array container for updates to each weight calculated via back propagation.
Setter method contains logic to ensure consistent dimensions with original weight array.
Raises
------
AssertionError
If attempting to set `w_updates` with dimensionally inconsistent array.
"""
return self.__w_updates
@w_updates.setter
def w_updates(self, w_updates):
try:
assert w_updates.shape == self.shape, f"Weight updates shape, {w_updates.shape}, != initial weights shape {self.shape}"
self.__w_updates = w_updates
except AttributeError:
self.__w_updates = None
@property
def b_updates(self):
"""Array container for `ConnectedSegment` back layer's bias updates.
Setter method contains logic to ensure dimensional consistency with back layer's bias array.
Raises
------
AssertionError
If attempting to set `b_updates` with dimensionally inconsistent array.
"""
return self.__b_updates
@b_updates.setter
def b_updates(self, b_updates):
try:
assert b_updates.shape == self.back.shape, f"Bias updates shape, {b_updates.shape}, != initial shape {self.back.shape}"
self.__b_updates = b_updates
except AttributeError:
self.__b_updates = None
def forward_pass(self):
"""Fundamental logic to calculate a forward pass between two layers in a `NeuralNetwork` instance.
"""
if type(self.front) in [InputLayer, ConnectedLayer]:
self.back.raw_vals = self.weights @ self.front.act_vals + self.back.biases
else:
self.back.raw_vals = self.weights @ self.front.raveled_output + self.back.biases
self.forward_passes += 1
def back_propagate(self, delta):
"""Fundamental logic to calculate weight and bias updates from back propagation in a `NeuralNetwork` instance.
Parameters
----------
delta : :obj:`ndarray`
Array containing necessary computations from earlier portions of back propagation.
"""
if type(self.front) in [InputLayer, ConnectedLayer]:
self.w_updates = delta @ self.front.act_vals.T
else:
self.w_updates = delta @ self.front.raveled_output.T
self.b_updates = delta
def setup_next_delta(self, delta):
"""Logic to calculate new deltas for each layer in back propagation calculation.
Parameters
----------
delta : :obj:`ndarray`
Array containing necessary computations from earlier portions of back propagation.
Returns
-------
NoneType
When back propagation has reached `InputLayer` instance.
:obj:`ndarray`
Delta array to use for next layer in back propagation computation.
"""
# If self.front is an InputLayer, i.e. we've backpropagated to the initial layer, there will be an
# AttributeError because InputLayers do not have `raw_vals`. Catch this error and pass because backpropagation
# is complete.
if type(self.front) is InputLayer:
return None
elif type(self.front) is ConnectedLayer:
return (self.weights.T @ delta) * activation(self.front.raw_vals, func=self.front.a_func, derivative=True)
else:
try:
activated = activation(self.front.raw_output, func=self.front.a_func, derivative=True)
return (self.weights.T @ delta).reshape(*self.front.shape) * activated
except AttributeError:
return (self.weights.T @ delta).reshape(*self.front.shape)
def update_weights(self, l_rate, m_factor, updater='', batch_size=50, momentum=True):
"""Function to update `ConnectedSegment` instance's weights and biases based on user input.
Parameters
----------
l_rate : float
The `NeuralNetwork`s learning rate.
m_factor : float
The `NeuralNetwork`s momentum factor.
updater : str
Keyword conveying type of activation function to use.
batch_size : int
Size of batch for mini-batch gradient descent.
momentum : bool
Whether or not to include momentum optimization.
Raises
------
KeyError
If `updater` is unsupported.
"""
self.weight_hist.append(self.weights.max())
if updater == 'sgd':
w_update = (l_rate * self.w_updates) + (m_factor * self.prev_w_updates)
b_update = (l_rate * self.b_updates) + (m_factor * self.prev_b_updates)
self.weights -= w_update
self.back.biases -= b_update
if momentum:
self.prev_w_updates = -w_update
self.prev_b_updates = -b_update
elif updater == 'mini_batch':
if self.forward_passes % batch_size != 0:
try:
self.w_batch += self.w_updates
self.b_batch += self.b_updates
except TypeError:
self.w_batch = self.w_updates
self.b_batch = self.b_updates
else:
w_update = (l_rate * (self.w_batch/batch_size)) + (m_factor * self.prev_w_updates)
b_update = (l_rate * (self.b_batch/batch_size)) + (m_factor * self.prev_b_updates)
self.weights -= w_update
self.back.biases -= b_update
if momentum:
self.prev_w_updates -= w_update
self.prev_b_updates -= b_update
self.w_batch = self.w_updates
self.b_batch = self.b_updates
else:
raise KeyError(f"Unrecognized updater: {updater}")
| 37.457429 | 131 | 0.627045 | 3.578125 |
393e0447e3fa7fdd02cccdf98e4027ae98ed8a66
| 7,632 |
py
|
Python
|
tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_2d.py
|
lanpa/chainer-tensorboard-example
|
33f0c6c1d414fd4e866179d77455ee8739e1f6d2
|
[
"MIT"
] | 2 |
2018-02-05T07:25:48.000Z
|
2018-08-28T20:29:45.000Z
|
tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_2d.py
|
lanpa/chainer-tensorboard-example
|
33f0c6c1d414fd4e866179d77455ee8739e1f6d2
|
[
"MIT"
] | null | null | null |
tests/chainer_tests/functions_tests/pooling_tests/test_max_pooling_2d.py
|
lanpa/chainer-tensorboard-example
|
33f0c6c1d414fd4e866179d77455ee8739e1f6d2
|
[
"MIT"
] | null | null | null |
import unittest
import mock
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(*testing.product({
'cover_all': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestMaxPooling2D(unittest.TestCase):
def setUp(self):
# Avoid unstability of numerical gradient
self.x = numpy.arange(
2 * 3 * 4 * 3, dtype=self.dtype).reshape(2, 3, 4, 3)
numpy.random.shuffle(self.x)
self.x = 2 * self.x / self.x.size - 1
if self.cover_all:
self.gy = numpy.random.uniform(
-1, 1, (2, 3, 3, 2)).astype(self.dtype)
else:
self.gy = numpy.random.uniform(
-1, 1, (2, 3, 2, 2)).astype(self.dtype)
def check_forward(self, x_data, use_cudnn='always'):
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = functions.max_pooling_2d(x, 3, stride=2, pad=1,
cover_all=self.cover_all)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
self.assertEqual(self.gy.shape, y_data.shape)
for k in six.moves.range(2):
for c in six.moves.range(3):
x = self.x[k, c]
if self.cover_all:
expect = numpy.array([
[x[0:2, 0:2].max(), x[0:2, 1:3].max()],
[x[1:4, 0:2].max(), x[1:4, 1:3].max()],
[x[3:4, 0:2].max(), x[3:4, 1:3].max()]])
else:
expect = numpy.array([
[x[0:2, 0:2].max(), x[0:2, 1:3].max()],
[x[1:4, 0:2].max(), x[1:4, 1:3].max()]])
testing.assert_allclose(expect, y_data[k, c])
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
def test_forward_cpu_wide(self): # see #120
x_data = numpy.random.rand(2, 3, 15, 15).astype(self.dtype)
x = chainer.Variable(x_data)
functions.max_pooling_2d(x, 6, stride=6, pad=0)
def test_forward_output_size_zero_cpu(self):
with six.assertRaisesRegex(
self, AssertionError,
'Height in the output should be positive.'):
x_data = numpy.random.rand(4, 4, 1, 4).astype(self.dtype)
x = chainer.Variable(x_data)
functions.max_pooling_2d(x, 3, stride=2)
with six.assertRaisesRegex(
self, AssertionError,
'Width in the output should be positive.'):
x_data = numpy.random.rand(4, 4, 4, 1).astype(self.dtype)
x = chainer.Variable(x_data)
functions.max_pooling_2d(x, 3, stride=2)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_non_contiguous(self):
self.check_forward(cuda.cupy.asfortranarray(cuda.to_gpu(self.x)))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), 'never')
@attr.gpu
def test_forward_output_size_zero_gpu(self):
with six.assertRaisesRegex(
self, AssertionError,
'Height in the output should be positive.'):
x_data = cuda.cupy.random.rand(4, 4, 1, 4).astype(self.dtype)
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', 'never'):
functions.max_pooling_2d(x, 3, stride=2)
with six.assertRaisesRegex(
self, AssertionError,
'Width in the output should be positive.'):
x_data = cuda.cupy.random.rand(4, 4, 4, 1).astype(self.dtype)
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', 'never'):
functions.max_pooling_2d(x, 3, stride=2)
@attr.cudnn
def test_forward_output_size_zero_cudnn(self):
with six.assertRaisesRegex(
self, AssertionError,
'Height in the output should be positive.'):
x_data = cuda.cupy.random.rand(4, 4, 1, 4).astype(self.dtype)
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', 'always'):
functions.max_pooling_2d(x, 3, stride=2)
with six.assertRaisesRegex(
self, AssertionError,
'Width in the output should be positive.'):
x_data = cuda.cupy.random.rand(4, 4, 4, 1).astype(self.dtype)
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', 'always'):
functions.max_pooling_2d(x, 3, stride=2)
def check_backward(self, x_data, y_grad, use_cudnn='always'):
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(
functions.MaxPooling2D(
3, stride=2, pad=1, cover_all=self.cover_all),
x_data, y_grad, dtype='d', atol=1e-4, rtol=1e-3)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_non_contiguous(self):
self.check_backward(
cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')
def test_backward_cpu_more_than_once(self):
func = functions.MaxPooling2D(
3, stride=2, pad=1, cover_all=self.cover_all)
func(self.x)
func.backward_cpu((self.x,), (self.gy,))
func.backward_cpu((self.x,), (self.gy,))
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestMaxPooling2DCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.arange(
2 * 3 * 4 * 3, dtype=self.dtype).reshape(2, 3, 4, 3)
self.gy = cuda.cupy.random.uniform(-1, 1,
(2, 3, 2, 2)).astype(self.dtype)
def forward(self):
x = chainer.Variable(self.x)
return functions.max_pooling_2d(
x, 3, stride=2, pad=1, cover_all=False)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with mock.patch('cupy.cudnn.cudnn.poolingForward') as func:
self.forward()
self.assertEqual(func.called,
chainer.should_use_cudnn('>=auto'))
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
expect = chainer.should_use_cudnn('>=auto')
y = self.forward()
# should be consistent to forward regardless of use_cudnn config
y.grad = self.gy
with mock.patch('cupy.cudnn.cudnn.poolingBackward') as func:
y.backward()
self.assertEqual(func.called, expect)
testing.run_module(__name__, __file__)
| 37.596059 | 79 | 0.587788 | 3.25 |
3d052e5a0477d31ad623d5afb140dc57645e2f28
| 3,086 |
swift
|
Swift
|
src/common/extensions/UIColor+Extensions.swift
|
danielruuth/Mpiddy-iOS
|
03187f08aaf73370013b7dca40570485db5fdfa0
|
[
"MIT"
] | 32 |
2019-03-08T18:36:27.000Z
|
2021-05-18T08:40:19.000Z
|
src/common/extensions/UIColor+Extensions.swift
|
danielruuth/Mpiddy-iOS
|
03187f08aaf73370013b7dca40570485db5fdfa0
|
[
"MIT"
] | 3 |
2019-11-04T20:41:08.000Z
|
2020-11-13T07:21:30.000Z
|
src/common/extensions/UIColor+Extensions.swift
|
danielruuth/Mpiddy-iOS
|
03187f08aaf73370013b7dca40570485db5fdfa0
|
[
"MIT"
] | 3 |
2019-09-10T12:38:14.000Z
|
2020-12-28T14:06:19.000Z
|
import UIKit
extension UIColor {
// MARK: - Initializers
public convenience init(rgb: Int32, alpha: CGFloat) {
let red = ((CGFloat)((rgb & 0xFF0000) >> 16)) / 255
let green = ((CGFloat)((rgb & 0x00FF00) >> 8)) / 255
let blue = ((CGFloat)(rgb & 0x0000FF)) / 255
self.init(red: red, green: green, blue: blue, alpha: alpha)
}
public convenience init(rgb: Int32) {
self.init(rgb: rgb, alpha: 1)
}
func inverted() -> UIColor {
var red: CGFloat = 0, green: CGFloat = 0, blue: CGFloat = 0, alpha: CGFloat = 0
getRed(&red, green: &green, blue: &blue, alpha: &alpha)
return UIColor(red: 1 - red, green: 1 - green, blue: 1 - blue, alpha: 1)
}
func isBlackOrWhite() -> Bool {
var red: CGFloat = 0, green: CGFloat = 0, blue: CGFloat = 0, alpha: CGFloat = 0
getRed(&red, green: &green, blue: &blue, alpha: &alpha)
if red > 0.91 && green > 0.91 && blue > 0.91 {
return true // white
}
if red < 0.09 && green < 0.09 && blue < 0.09 {
return true // black
}
return false
}
func isDark() -> Bool {
var red: CGFloat = 0, green: CGFloat = 0, blue: CGFloat = 0, alpha: CGFloat = 0
getRed(&red, green: &green, blue: &blue, alpha: &alpha)
let lum = 0.2126 * red + 0.7152 * green + 0.0722 * blue
if lum < 0.5 {
return true
}
return false
}
func colorWithMinimumSaturation(_ minSaturation: CGFloat) -> UIColor {
var hue: CGFloat = 0, sat: CGFloat = 0, val: CGFloat = 0, alpha: CGFloat = 0
getHue(&hue, saturation: &sat, brightness: &val, alpha: &alpha)
if sat < minSaturation {
return UIColor(hue: hue, saturation: sat, brightness: val, alpha: alpha)
}
return self
}
func isDistinct(fromColor compareColor: UIColor) -> Bool {
var red1: CGFloat = 0, green1: CGFloat = 0, blue1: CGFloat = 0, alpha1: CGFloat = 0
var red2: CGFloat = 0, green2: CGFloat = 0, blue2: CGFloat = 0, alpha2: CGFloat = 0
getRed(&red1, green: &green1, blue: &blue1, alpha: &alpha1)
compareColor.getRed(&red2, green: &green2, blue: &blue2, alpha: &alpha2)
let threshold: CGFloat = 0.25
if abs(red1 - red2) > threshold || abs(green1 - green2) > threshold || abs(blue1 - blue2) > threshold || abs(alpha1 - alpha2) > threshold {
// check for grays, prevent multiple gray colors
if abs(red1 - green1) < 0.03 && abs(red1 - blue1) < 0.03 {
if abs(red2 - green2) < 0.03 && abs(red2 - blue2) < 0.03 {
return false
}
}
return true
}
return false
}
func isContrasted(fromColor color: UIColor) -> Bool {
var red1: CGFloat = 0, green1: CGFloat = 0, blue1: CGFloat = 0, alpha1: CGFloat = 0
var red2: CGFloat = 0, green2: CGFloat = 0, blue2: CGFloat = 0, alpha2: CGFloat = 0
getRed(&red1, green: &green1, blue: &blue1, alpha: &alpha1)
color.getRed(&red2, green: &green2, blue: &blue2, alpha: &alpha2)
let lum1 = 0.2126 * red1 + 0.7152 * green1 + 0.0722 * blue1
let lum2 = 0.2126 * red2 + 0.7152 * green2 + 0.0722 * blue2
var contrast: CGFloat = 0
if lum1 > lum2 {
contrast = (lum1 + 0.05) / (lum2 + 0.05)
} else {
contrast = (lum2 + 0.05) / (lum1 + 0.05)
}
return contrast > 1.6
}
}
| 30.86 | 141 | 0.622813 | 3.0625 |
3c906808120355e8bced14119cee94d8a8ddb171
| 1,591 |
ps1
|
PowerShell
|
Exchange/Get-ExchangeUserMailboxMismatch.ps1
|
bkeegan/PowershellBoilerplate2
|
3053fc60432fa7bd06225d6bcb4fa15ebc4ac34a
|
[
"MS-PL"
] | null | null | null |
Exchange/Get-ExchangeUserMailboxMismatch.ps1
|
bkeegan/PowershellBoilerplate2
|
3053fc60432fa7bd06225d6bcb4fa15ebc4ac34a
|
[
"MS-PL"
] | null | null | null |
Exchange/Get-ExchangeUserMailboxMismatch.ps1
|
bkeegan/PowershellBoilerplate2
|
3053fc60432fa7bd06225d6bcb4fa15ebc4ac34a
|
[
"MS-PL"
] | null | null | null |
<#
.SYNOPSIS
Retrives mailbox logon statistics and returns entries where the username of the mailbox doesn't match the user accounts username that is connected to it.
.DESCRIPTION
Retrives mailbox logon statistics and returns entries where the username of the mailbox doesn't match the user accounts username that is connected to it. This may indicate that another user is viewing email in another individual's mailbox
.NOTES
File Name : Get-ExchangeUserMailboxMismatch.ps1
Author : Brenton keegan - [email protected]
Licenced under GPLv3
.LINK
https://github.com/bkeegan/PowershellBoilerplate2
License: http://www.gnu.org/copyleft/gpl.html
.EXAMPLE
Get-ExchangeUserMailboxMismatch -c "casServerName" -m "mailboxServerName"
.EXAMPLE
#>
#imports
import-module activedirectory
Function Get-ExchangeUserMailboxMismatch
{
[cmdletbinding()]
Param
(
[parameter(Mandatory=$true)]
[alias("c")]
[string]$casServer,
[parameter(Mandatory=$true)]
[alias("m")]
[string]$mbServer
)
$session = New-PSSession -configurationname Microsoft.Exchange -connectionURI http://$casServer/PowerShell
Import-PSSession $session
$connectedExUsers = Get-LogonStatistics -server $mbServer| where {$_.ClientMode -ne "Cached" } | where {$_.ClientName -eq $casServer}
Foreach($connectedExUser in $connectedExUsers)
{
$adUser = $connectedExUser.Windows2000Account -replace "+.\\",""
$adUser = get-aduser $adUser -property displayname
if ($connectedExUser.Username -ne $adUser.displayname)
{
$connectedExUser
}
}
}
| 28.410714 | 243 | 0.740415 | 3.15625 |
6aa0d7423f33aa0d8824dd48181a86544df92272
| 4,320 |
lua
|
Lua
|
Dusk/dusk_core/run/tileculling.lua
|
alextrevisan/Dusk-Engine
|
982c28a47a91544c21b232b2a12adee74f6bd314
|
[
"MIT"
] | null | null | null |
Dusk/dusk_core/run/tileculling.lua
|
alextrevisan/Dusk-Engine
|
982c28a47a91544c21b232b2a12adee74f6bd314
|
[
"MIT"
] | null | null | null |
Dusk/dusk_core/run/tileculling.lua
|
alextrevisan/Dusk-Engine
|
982c28a47a91544c21b232b2a12adee74f6bd314
|
[
"MIT"
] | 1 |
2019-10-25T04:40:04.000Z
|
2019-10-25T04:40:04.000Z
|
--------------------------------------------------------------------------------
--[[
Dusk Engine Component: Tile Culling
Manages displayed tiles for tile layers in a map.
--]]
--------------------------------------------------------------------------------
local tileculling = {}
--------------------------------------------------------------------------------
-- Localize
--------------------------------------------------------------------------------
local require = require
local editQueue = require("Dusk.dusk_core.misc.editqueue")
local screen = require("Dusk.dusk_core.misc.screen")
local newEditQueue = editQueue.new
local math_abs = math.abs
local math_ceil = math.ceil
--------------------------------------------------------------------------------
-- Add Tile Culling to a Map
--------------------------------------------------------------------------------
function tileculling.addTileCulling(map)
local divTileWidth, divTileHeight = 1 / map.data.tileWidth, 1 / map.data.tileHeight
local culling = {
layer = {}
}
------------------------------------------------------------------------------
-- Load Layers
------------------------------------------------------------------------------
for layer, i in map.tileLayers() do
if layer.tileCullingEnabled then
local layerCulling = {
prev = {l = 0, r = 0, t = 0, b = 0},
now = {l = 0, r = 0, t = 0, b = 0},
update = function() end
}
local prev, now = layerCulling.prev, layerCulling.now
local layerEdits = newEditQueue()
layerEdits.setTarget(layer)
--------------------------------------------------------------------------
-- Update Culling
--------------------------------------------------------------------------
layerCulling.update = function()
local nl, nr, nt, nb = layerCulling.updatePositions()
local pl, pr, pt, pb = layerCulling.prev.l, layerCulling.prev.r, layerCulling.prev.t, layerCulling.prev.b
if nl == pl and nr == pr and nt == pt and nb == pb then return end
-- Difference between current positions and previous positions
-- This is used to tell which direction the layer has moved
local lDiff = nl - pl
local rDiff = nr - pr
local tDiff = nt - pt
local bDiff = nb - pb
if lDiff > 0 then
layerEdits.add(pl, nl, pt, pb, "e")
elseif lDiff < 0 then
layerEdits.add(pl, nl, nt, nb, "d")
end
if rDiff < 0 then
layerEdits.add(pr, nr, pt, pb, "e")
elseif rDiff > 0 then
layerEdits.add(pr, nr, nt, nb, "d")
end
if tDiff > 0 then
layerEdits.add(nl, nr, pt, layerCulling.now.t, "e")
elseif tDiff < 0 then
layerEdits.add(nl, nr, pt, layerCulling.now.t, "d")
end
if bDiff < 0 then
layerEdits.add(nl, nr, pb, layerCulling.now.b, "e")
elseif bDiff > 0 then
layerEdits.add(nl, nr, pb, layerCulling.now.b, "d")
end
-- Guard against tile "leaks"
if lDiff > 0 and tDiff > 0 then
layerEdits.add(pl, nl, pt, nt, "e")
end
if rDiff < 0 and tDiff > 0 then
layerEdits.add(nr, pr, pt, nt, "e")
end
if lDiff > 0 and bDiff < 0 then
layerEdits.add(pl, nl, nb, pb, "e")
end
if rDiff < 0 and bDiff < 0 then
layerEdits.add(nr, pr, nb, pb, "e")
end
layerEdits.execute()
end
--------------------------------------------------------------------------
-- Update Positions
--------------------------------------------------------------------------
layerCulling.updatePositions = function()
local l, t = layer:contentToLocal(screen.left, screen.top)
local r, b = layer:contentToLocal(screen.right, screen.bottom)
-- Calculate left/right/top/bottom to the nearest tile
-- We expand each position by one to hide the drawing and erasing
l = math_ceil(l * divTileWidth) - 1
r = math_ceil(r * divTileWidth) + 1
t = math_ceil(t * divTileHeight) - 1
b = math_ceil(b * divTileHeight) + 1
-- Update previous position to be equal to current position
prev.l = now.l
prev.r = now.r
prev.t = now.t
prev.b = now.b
-- Reset current position
now.l = l
now.r = r
now.t = t
now.b = b
return l, r, t, b
end
layer._updateTileCulling = layerCulling.update
culling.layer[i] = layerCulling
end
end
return culling
end
return tileculling
| 29.189189 | 109 | 0.49537 | 3.328125 |
c954a197ec5f97098352b921198e3c0ef8e24d3a
| 2,530 |
asm
|
Assembly
|
libsrc/graphics/dfill2.asm
|
jpoikela/z88dk
|
7108b2d7e3a98a77de99b30c9a7c9199da9c75cb
|
[
"ClArtistic"
] | 38 |
2021-06-18T12:56:15.000Z
|
2022-03-12T20:38:40.000Z
|
libsrc/graphics/dfill2.asm
|
jpoikela/z88dk
|
7108b2d7e3a98a77de99b30c9a7c9199da9c75cb
|
[
"ClArtistic"
] | 2 |
2021-06-20T16:28:12.000Z
|
2021-11-17T21:33:56.000Z
|
libsrc/graphics/dfill2.asm
|
jpoikela/z88dk
|
7108b2d7e3a98a77de99b30c9a7c9199da9c75cb
|
[
"ClArtistic"
] | 6 |
2021-06-18T18:18:36.000Z
|
2021-12-22T08:01:32.000Z
|
; Z88DK Small C+ Graphics Functions
; Fills a screen area
; Original code by Massimo Morra (Thanks!)
; Ported by Stefano Bodrato
;
; Feb 2000 - Platform dependent stack usage
; Stack usage should be maxy*8 (512 bytes for the Z88)
;
; Since some platform (expecially the TI83) has very little stack space,
; we undersize it; this will cause a crash if a big area is filled.
;
; GENERIC VERSION
; IT DOESN'T MAKE USE OF ALTERNATE REGISTERS
; IT IS BASED ON "pointxy" and "plotpixel"
;
; $Id: dfill2.asm,v 1.4 2016-04-13 21:09:09 dom Exp $
;
INCLUDE "graphics/grafix.inc"
SECTION code_graphics
PUBLIC do_fill
EXTERN pointxy
EXTERN plotpixel
;ix points to the table on stack (above)
;Entry:
; d=x0 e=y0
.do_fill
ld hl,-maxy*3 ; create buffer 1 on stack
add hl,sp ; The stack size depends on the display height.
ld sp,hl ; The worst case is when we paint a blank
push hl ; display starting from the center.
pop ix
ld (hl),d
inc hl
ld (hl),e
inc hl
ld (hl),255
ld hl,-maxy*3 ; create buffer 2 on stack
add hl,sp
ld sp,hl
.loop push ix
push hl
call cfill
pop ix
pop hl
;.asave ld a,0
;and a
push de
pop af
;;ex af,af ; Restore the Z flag
;;push af
;;ex af,af
;;pop af
jr nz,loop
ld hl,maxy*6 ; restore the stack pointer (parm*2)
add hl,sp
ld sp,hl
ret
.cfill
;sub a,a ; Reset the Z flag
;ex af,af ; and save it
xor a
push af
pop de
;ld (asave+1),a
.next ld a,(ix+0)
cp 255 ; stopper ?
ret z ; return
ld b,a
ld c,(ix+1)
push bc
or a
jr z,l1
dec b
call doplot
pop bc
push bc
.l1
ld a,b
cp maxy-1
jr z,l2
inc b
call doplot
pop bc
push bc
.l2
ld a,c
or a
jr z,l3
dec c
call doplot
.l3
pop bc
ld a,c
cp maxx-1
jr z,l4
inc c
call doplot
.l4
inc ix
inc ix
jr next
.doplot
push bc
ld (hl),255
push hl
ld l,b
ld h,c
;call pixeladdress ; bc must be saved by pixeladdress !
push de
call pointxy
pop de
pop hl
jr z,dontret
pop af
ret
.dontret
or b ; Z flag set...
; or 1
; and a
;ld (asave+1),a
push af
;pop de
; ld (de),a
push hl
ld l,b
ld h,c
call plotpixel
pop hl
pop de
pop bc
ld (hl),b
inc hl
ld (hl),c
inc hl
ld (hl),255
;ex af,af ; Save the Z flag
xor a
ret
SECTION bss_graphics
.spsave defw 0
| 13.825137 | 72 | 0.578656 | 3.125 |
e26cf19fabe9ce6e97506afa3ac0b51f3d4d1432
| 3,935 |
py
|
Python
|
examples/HD143006/common_functions.py
|
Jaye-yi/MPoL
|
eab782c275a48c5185de290cacdf12e68bbb66a5
|
[
"MIT"
] | 18 |
2020-12-18T18:25:42.000Z
|
2022-02-25T18:43:38.000Z
|
examples/HD143006/common_functions.py
|
Jaye-yi/MPoL
|
eab782c275a48c5185de290cacdf12e68bbb66a5
|
[
"MIT"
] | 49 |
2021-01-13T03:44:56.000Z
|
2021-10-05T21:43:14.000Z
|
examples/HD143006/common_functions.py
|
Jaye-yi/MPoL
|
eab782c275a48c5185de290cacdf12e68bbb66a5
|
[
"MIT"
] | 6 |
2021-05-09T21:54:55.000Z
|
2021-10-05T14:13:43.000Z
|
import numpy as np
import torch
from mpol import (
losses,
coordinates,
images,
precomposed,
gridding,
datasets,
connectors,
utils,
)
from astropy.utils.data import download_file
from ray import tune
import matplotlib.pyplot as plt
# We want to split these
# because otherwise the data loading routines will be rehashed several times.
def train(
model, dataset, optimizer, config, device, writer=None, report=False, logevery=50
):
"""
Args:
model: neural net model
dataset: to use to train against
optimizer: tied to model parameters and used to take a step
config: dictionary including epochs and hyperparameters.
device: "cpu" or "cuda"
writer: tensorboard writer object
"""
model = model.to(device)
model.train()
dataset = dataset.to(device)
residuals = connectors.GriddedResidualConnector(model.fcube, dataset)
residuals.to(device)
for iteration in range(config["epochs"]):
optimizer.zero_grad()
vis = model.forward()
sky_cube = model.icube.sky_cube
loss = (
losses.nll_gridded(vis, dataset)
+ config["lambda_sparsity"] * losses.sparsity(sky_cube)
+ config["lambda_TV"] * losses.TV_image(sky_cube)
+ config["entropy"] * losses.entropy(sky_cube, config["prior_intensity"])
)
if (iteration % logevery == 0) and writer is not None:
writer.add_scalar("loss", loss.item(), iteration)
writer.add_figure("image", log_figure(model, residuals), iteration)
loss.backward()
optimizer.step()
if report:
tune.report(loss=loss.item())
return loss.item()
def test(model, dataset, device):
model = model.to(device)
model.eval()
dataset = dataset.to(device)
vis = model.forward()
loss = losses.nll_gridded(vis, dataset)
return loss.item()
def cross_validate(model, config, device, k_fold_datasets, MODEL_PATH, writer=None):
test_scores = []
for k_fold, (train_dset, test_dset) in enumerate(k_fold_datasets):
# reset model
model.load_state_dict(torch.load(MODEL_PATH))
# create a new optimizer for this k_fold
optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"])
# train for a while
train(model, train_dset, optimizer, config, device, writer=writer)
# evaluate the test metric
test_scores.append(test(model, test_dset, device))
# aggregate all test scores and sum to evaluate cross val metric
test_score = np.sum(np.array(test_scores))
# log to ray tune
tune.report(cv_score=test_score)
return test_score
def log_figure(model, residuals):
"""
Create a matplotlib figure showing the current image state.
Args:
model: neural net model
"""
# populate residual connector
residuals()
fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(10, 10))
im = ax[0, 0].imshow(
np.squeeze(model.icube.sky_cube.detach().cpu().numpy()),
origin="lower",
interpolation="none",
extent=model.icube.coords.img_ext,
)
plt.colorbar(im, ax=ax[0, 0])
im = ax[0, 1].imshow(
np.squeeze(residuals.sky_cube.detach().cpu().numpy()),
origin="lower",
interpolation="none",
extent=residuals.coords.img_ext,
)
plt.colorbar(im, ax=ax[0, 1])
im = ax[1, 0].imshow(
np.squeeze(torch.log(model.fcube.ground_amp.detach()).cpu().numpy()),
origin="lower",
interpolation="none",
extent=residuals.coords.vis_ext,
)
plt.colorbar(im, ax=ax[1, 0])
im = ax[1, 1].imshow(
np.squeeze(torch.log(residuals.ground_amp.detach()).cpu().numpy()),
origin="lower",
interpolation="none",
extent=residuals.coords.vis_ext,
)
plt.colorbar(im, ax=ax[1, 1])
return fig
| 27.137931 | 85 | 0.633545 | 3.234375 |
9fe893a9e587c5ede3d63f8e13b450018dc68f8a
| 749 |
py
|
Python
|
pseudogen/log.py
|
ansobolev/PseudoGenerator
|
6d94f4cd93411a963120ea218d26091d7ac46e8a
|
[
"MIT"
] | 1 |
2020-05-17T16:17:12.000Z
|
2020-05-17T16:17:12.000Z
|
pseudogen/log.py
|
ansobolev/PseudoGenerator
|
6d94f4cd93411a963120ea218d26091d7ac46e8a
|
[
"MIT"
] | null | null | null |
pseudogen/log.py
|
ansobolev/PseudoGenerator
|
6d94f4cd93411a963120ea218d26091d7ac46e8a
|
[
"MIT"
] | 1 |
2021-11-18T02:14:55.000Z
|
2021-11-18T02:14:55.000Z
|
import logging
loggers = {}
def get_logger(name, element):
global loggers
if loggers.get(name):
return loggers.get(name)
else:
# create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
fh = logging.FileHandler(element + '/log.dat')
fh.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s: %(name)s - %(levelname)s - %(message)s')
# add formatter to fh
fh.setFormatter(formatter)
# add fh to logger
logger.addHandler(fh)
loggers[name] = logger
return logger
def interlog(logger):
logger.info('-----' * 10)
| 22.69697 | 92 | 0.604806 | 3.1875 |
2fcbbf1c381b14f439b0cbb7f5b2a904b4f9f437
| 2,622 |
py
|
Python
|
scrape.py
|
amitshankar97/basketball-reference-scraper
|
e2d22b79d06847afa6d12d5fd996c7ea575c2749
|
[
"MIT"
] | null | null | null |
scrape.py
|
amitshankar97/basketball-reference-scraper
|
e2d22b79d06847afa6d12d5fd996c7ea575c2749
|
[
"MIT"
] | 6 |
2021-03-18T21:39:32.000Z
|
2022-03-11T23:36:25.000Z
|
scrape.py
|
amitshankar97/basketball-reference-scraper
|
e2d22b79d06847afa6d12d5fd996c7ea575c2749
|
[
"MIT"
] | null | null | null |
import threading
from db import DB
BASE_URL = 'https://www.basketball-reference.com'
import requests
from bs4 import BeautifulSoup
import re
class Scraper (threading.Thread):
def __init__(self, threadID, letter):
threading.Thread.__init__(self)
self.threadID = threadID
self.letter = letter
def get_table(self, webpage, category):
# div for category
if category == 'totals':
stats_div = webpage.find('div', id=("div_totals_clone"))
else:
stats_div = webpage.find('div', id=("all_" + category))
# stats = stats_div.find_all('tr') # get all rows for category
stats = stats_div.find_all('tr', id=re.compile(category)) # get all rows for category
seasons = []
for statRow in stats:
season = {}
season['season'] = statRow.find('th', {"data-stat" : "season"}).text
stats = statRow.find_all('td')
for stat in stats:
statName = stat.attrs['data-stat']
not_integers = ['team_id', 'lg_id', 'pos']
if statName in not_integers or stat.text == '':
statValue = stat.text
else:
statValue = float(stat.text)
season[statName] = statValue
seasons.append(season)
return seasons
def get_html(self, url):
page = requests.get(url)
html = BeautifulSoup(page.text, 'lxml')
return html
# Scrape players by letter
def run(self):
link = BASE_URL + '/players/' + self.letter
html = self.get_html(link)
if html:
playersDiv = html.find('div', id='all_players')
if(playersDiv == None):
return []
links = playersDiv.find_all('a')
players = []
for link in links:
player = {}
playerLink = link.get('href')
if not playerLink.startswith('/players'):
continue
player['name'] = link.text
player['url'] = BASE_URL + playerLink
webpage = self.get_html(player['url'])
player['per_game'] = self.get_table(webpage, 'per_game')
# player['totals'] = get_table(webpage, 'totals')
# player['playoffs_per_game'] = get_table(webpage, 'playoffs_per_game')
mongo = DB()
success = mongo.addOrUpdatePlayer(player=player)
# return players
print(self.threadID + " exited.")
| 30.137931 | 93 | 0.532799 | 3.125 |
cdfab437767c454e83a6f74088b4a3f6c49619aa
| 5,004 |
cs
|
C#
|
UnitTest1.cs
|
utilitydelta/dotnet-brotli-tester
|
5f817b49b1c000a3ccbad0e2afd3170b1dfb8ae7
|
[
"MIT"
] | null | null | null |
UnitTest1.cs
|
utilitydelta/dotnet-brotli-tester
|
5f817b49b1c000a3ccbad0e2afd3170b1dfb8ae7
|
[
"MIT"
] | null | null | null |
UnitTest1.cs
|
utilitydelta/dotnet-brotli-tester
|
5f817b49b1c000a3ccbad0e2afd3170b1dfb8ae7
|
[
"MIT"
] | null | null | null |
using System;
using System.Collections.Generic;
using System.IO;
using System.IO.Compression;
using System.Reflection;
using Xunit;
namespace DotnetCoreBrotliTester
{
public class UnitTest1
{
private static Stream ExtractResource(string filename)
{
var a = Assembly.GetExecutingAssembly();
return a.GetManifestResourceStream(filename);
}
[Theory]
[InlineData(50000)]
[InlineData(500000)]
[InlineData(5000000)]
public void TestBrotliChunkDecompressOk(int chunkSize)
{
var origStream = ExtractResource("DotnetCoreBrotliTester.image.jpg");
var compressedStream = new MemoryStream();
var compressedBoundaries = new List<(long start, long end)>();
while (origStream.Position < origStream.Length)
{
var size = origStream.Length - origStream.Position;
if (size > chunkSize) size = chunkSize;
var bytes = new byte[size];
origStream.Read(bytes, 0, bytes.Length);
var startPos = compressedStream.Position;
using (var compressor = new BrotliStream(compressedStream, CompressionMode.Compress, true))
{
compressor.Write(bytes, 0, bytes.Length);
}
var endPos = compressedStream.Position;
compressedBoundaries.Add((startPos, endPos));
}
var decompressedOriginal = new MemoryStream();
compressedStream.Position = 0;
foreach (var compressedBoundary in compressedBoundaries)
{
var compressedBytes = new byte[compressedBoundary.end-compressedBoundary.start];
compressedStream.Read(compressedBytes, 0, compressedBytes.Length);
using (var chunk = new MemoryStream(compressedBytes))
using (var decompressor = new BrotliStream(chunk, CompressionMode.Decompress, true))
{
decompressor.CopyTo(decompressedOriginal);
}
}
decompressedOriginal.Position = 0;
origStream.Position = 0;
using (var fileTest = File.OpenWrite($"brotli-working-image-{chunkSize}.jpg"))
{
decompressedOriginal.CopyTo(fileTest);
}
decompressedOriginal.Position = 0;
Assert.True(CompareStreams(origStream, decompressedOriginal));
}
[Theory]
[InlineData(50000)]
[InlineData(500000)]
[InlineData(5000000)]
public void TestBrotliNoChunkDecompressFail(int chunkSize)
{
var origStream = ExtractResource("DotnetCoreBrotliTester.image.jpg");
var compressedStream = new MemoryStream();
while (origStream.Position < origStream.Length)
{
var size = origStream.Length - origStream.Position;
if (size > chunkSize) size = chunkSize;
var bytes = new byte[size];
origStream.Read(bytes, 0, bytes.Length);
using (var compressor = new BrotliStream(compressedStream, CompressionMode.Compress, true))
{
compressor.Write(bytes, 0, bytes.Length);
}
}
var decompressedOriginal = new MemoryStream();
compressedStream.Position = 0;
using (var decompressor = new BrotliStream(compressedStream, CompressionMode.Decompress, true))
{
decompressor.CopyTo(decompressedOriginal);
}
decompressedOriginal.Position = 0;
origStream.Position = 0;
using (var fileTest = File.OpenWrite($"brotli-fail-image-{chunkSize}.jpg"))
{
decompressedOriginal.CopyTo(fileTest);
}
decompressedOriginal.Position = 0;
if (chunkSize <= 500000)
{
Assert.False(CompareStreams(origStream, decompressedOriginal));
}
else
{
Assert.True(CompareStreams(origStream, decompressedOriginal));
}
}
private bool CompareStreams(Stream a, Stream b)
{
if (a == null &&
b == null)
return true;
if (a == null ||
b == null)
{
throw new ArgumentNullException(
a == null ? "a" : "b");
}
if (a.Length < b.Length)
return false;
if (a.Length > b.Length)
return false;
for (int i = 0; i < a.Length; i++)
{
int aByte = a.ReadByte();
int bByte = b.ReadByte();
if (aByte.CompareTo(bByte) != 0)
return false;
}
return true;
}
}
}
| 32.921053 | 107 | 0.540368 | 3.015625 |
9fe28045cd5e97868df36d08bc6a7e65ac0c5dfc
| 680 |
py
|
Python
|
factorial.py
|
fionanealon/python-exercises
|
f78ebd4ccd5504bf6c6cc07992ec5a72f6a02914
|
[
"Apache-2.0"
] | null | null | null |
factorial.py
|
fionanealon/python-exercises
|
f78ebd4ccd5504bf6c6cc07992ec5a72f6a02914
|
[
"Apache-2.0"
] | null | null | null |
factorial.py
|
fionanealon/python-exercises
|
f78ebd4ccd5504bf6c6cc07992ec5a72f6a02914
|
[
"Apache-2.0"
] | null | null | null |
# Fiona Nealon, 2018-04-07
# A function called factorial() which takes a single input and returns it's factorial
def factorial(upto):
# Create a variable that will become the answer
multupto = 1
# Loop through numbers i from 1 to upto
for i in range(1, upto + 1):
# Multiply ans by i, changing ans to that
multupto = multupto * i
# Return the factorial
return multupto
# Tests from questions
print("The multiplication of the values from to 1 to 5 inclusive is", factorial(5))
print("The multiplication of the values from to 1 to 7 inclusive is", factorial(7))
print("The multiplication of the values from to 1 to 10 inclusive is", factorial(10))
| 37.777778 | 87 | 0.719118 | 3.375 |
46dc330cd27e5dc9662eb6e7d50fb7953d1866a0
| 826 |
dart
|
Dart
|
test/bitcount_test.dart
|
akhomchenko/bitcount
|
c19a3c36cc7aa0c33e7f72043a6fc7f5de3b799e
|
[
"MIT"
] | 1 |
2022-03-06T15:18:01.000Z
|
2022-03-06T15:18:01.000Z
|
test/bitcount_test.dart
|
gagoman/bitcount
|
c19a3c36cc7aa0c33e7f72043a6fc7f5de3b799e
|
[
"MIT"
] | 4 |
2021-03-10T08:38:29.000Z
|
2021-11-16T21:23:16.000Z
|
test/bitcount_test.dart
|
gagoman/bitcount
|
c19a3c36cc7aa0c33e7f72043a6fc7f5de3b799e
|
[
"MIT"
] | 1 |
2021-03-10T09:08:46.000Z
|
2021-03-10T09:08:46.000Z
|
import 'dart:math';
import 'package:bitcount/bitcount.dart';
import 'package:test/test.dart';
void main() {
group('.bitCount()', () {
void checkBitCount(int number, int expectedCount) {
test('$expectedCount for $number', () {
expect(number.bitCount(), expectedCount);
});
}
group('returns', () {
for (var shift = 0; shift < 53; shift += 1) {
checkBitCount(_shiftLeft(1, shift), 1);
checkBitCount(_shiftLeft(1, shift) - 1, shift);
}
});
group('returns', () {
for (var shift = 53; shift < 64; shift += 1) {
checkBitCount(_shiftLeft(1, shift), 1);
checkBitCount(_shiftLeft(1, shift) - 1, shift);
}
}, testOn: '!js'); // see .bitCount() docs
});
}
int _shiftLeft(int n, int shift) {
return n * pow(2, shift).toInt();
}
| 25.030303 | 55 | 0.566586 | 3.109375 |
7ac5bb10464de846362f9f983f32d1ce4fbeb060
| 1,833 |
cs
|
C#
|
Assets/scripts/enemyAI.cs
|
southrad69/Atividade1
|
f9f649d7fb59b80b152983970fbda9ca40944319
|
[
"Apache-2.0"
] | null | null | null |
Assets/scripts/enemyAI.cs
|
southrad69/Atividade1
|
f9f649d7fb59b80b152983970fbda9ca40944319
|
[
"Apache-2.0"
] | null | null | null |
Assets/scripts/enemyAI.cs
|
southrad69/Atividade1
|
f9f649d7fb59b80b152983970fbda9ca40944319
|
[
"Apache-2.0"
] | null | null | null |
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class enemyAI : MonoBehaviour
{
public ParticleSystem fire;
public bool enableshoot = false;
int index = 0;
float rotation = 0;
float patrolSpeed = 150;
public float radius = 10;
GameObject target;
public GameObject[] waypoints;
void Start()
{
target = GameObject.FindGameObjectWithTag("Player");
}
void Update()
{
fire.Stop();
float dist = Vector3.Distance(target.transform.position, transform.position);
if (enableshoot == false){
if (rotation == 360){
rotation = 0;
}
rotation = rotation + radius * Time.deltaTime;
gameObject.transform.rotation = Quaternion.Euler(0, rotation, 0);
gameObject.transform.Translate(0, 0, patrolSpeed * Time.deltaTime);
print("Moving at patrol speed");
}
if (enableshoot)
{
fire.Emit(1);
transform.LookAt(target.transform.position + target.transform.forward * dist * 0.5f);
if (dist <= 30)
{
transform.position = Vector3.MoveTowards(transform.position, waypoints[index].transform.position, Time.deltaTime * 20);
print("Decreasing speed");
}
else
{
transform.position = Vector3.MoveTowards(transform.position, waypoints[index].transform.position, Time.deltaTime * 80);
print("Moving at engagement speed");
}
}
}
private void OnTriggerEnter(Collider other)
{
enableshoot = true;
}
private void OnTriggerExit(Collider other)
{
fire.Stop();
enableshoot = false;
}
}
| 24.118421 | 135 | 0.57174 | 3.140625 |
2fe0baf495e13449bb86e81600805580f4d2c6d0
| 856 |
py
|
Python
|
exercises/pressing_button.py
|
polde-live/interprog1
|
a49ecef14453839518f1e8a6551fb3af493b1c2c
|
[
"Unlicense"
] | null | null | null |
exercises/pressing_button.py
|
polde-live/interprog1
|
a49ecef14453839518f1e8a6551fb3af493b1c2c
|
[
"Unlicense"
] | null | null | null |
exercises/pressing_button.py
|
polde-live/interprog1
|
a49ecef14453839518f1e8a6551fb3af493b1c2c
|
[
"Unlicense"
] | null | null | null |
"""
An Introduction to Interactive Programming in Python (Part 1)
Practice exercises for timers # 5.
Reflex tester
"""
import simpleguitk as simplegui
total_ticks = 0
first_click = True
# Timer handler
def tick():
global total_ticks
total_ticks += 1
def measure_time():
time_elapsed = total_ticks / 100.0
print "Elapsed time:\t %g seconds" %time_elapsed
# Button handler
def click():
global first_click
global total_ticks
if first_click:
total_ticks = 0
print "Starting measurment."
else:
measure_time()
first_click = not first_click
# Create frame and timer
frame = simplegui.create_frame("Counter with buttons", 200, 200)
frame.add_button("Click me", click, 200)
timer = simplegui.create_timer(10, tick)
# Start timer
timer.start()
frame.start()
| 19.454545 | 64 | 0.674065 | 3.078125 |
8080ada46ff72a4584b808dcf9312c0fc262517c
| 2,134 |
lua
|
Lua
|
lua/enemies/bossShot.lua
|
nekolabs/bob-the-bacterium
|
27e8fde7b71cc3e3757d8779bf626be39ed5c224
|
[
"MIT"
] | 1 |
2016-08-28T23:00:53.000Z
|
2016-08-28T23:00:53.000Z
|
lua/enemies/bossShot.lua
|
nekolabs/bob-the-bacterium
|
27e8fde7b71cc3e3757d8779bf626be39ed5c224
|
[
"MIT"
] | null | null | null |
lua/enemies/bossShot.lua
|
nekolabs/bob-the-bacterium
|
27e8fde7b71cc3e3757d8779bf626be39ed5c224
|
[
"MIT"
] | null | null | null |
-- bossShot.lua
--atlMap = atl.Loader.load("Maps/level0.tmx")
local lg = love.graphics
local bulletImg = lg.newImage("assets/bullet.png")--not the actual bullet, but important for the sizes
local buW, buH = bulletImg:getWidth(), bulletImg:getHeight()
local shotMusic = love.audio.newSource("sfx/laser.ogg"); -- sfx for lasers of miniBoss
shotMusic:setLooping(false);
bossShot2 = false
bossBullet = {}
function createBossBullet(x,y, BbulletDir)
table.insert(bossBullet, { x=x, y=y, width=buW-2.5, height=buH-2.5, BbulletDir = BbulletDir, vxBbu = 200 } )
shotMusic:play();
shotMusic:setLooping(false);
end
function bossBullet_update(dt)
-- we need to split it or after the turret is dead the bullets won't move further
for Bbi,Bbv in ipairs(bossBullet) do
if Bbv.BbulletDir == "clockwise" then
-- movement of the bullet to the right with velocity of Bu (overriding xPos(bu))
Bbv.x = Bbv.x + (Bbv.vxBbu*dt)
elseif Bbv.BbulletDir == "anticlockwise" then
Bbv.x = Bbv.x - (Bbv.vxBbu*dt)
end
end
for Bbi,Bbv in ipairs(bossBullet) do
for ei,ev in ipairs(RedBoss) do
if Bbv.BbulletDir == "clockwise" then
if Bbv.x > atlMap.width*atlMap.tileWidth + (-1)*buW then
for i=1,1 do
table.remove(bossBullet, Bbi)
end
elseif Bbv.x > ev.x + 750 then
for i=1,1 do
table.remove(bossBullet, Bbi)
end
end
end
if Bbv.BbulletDir == "anticlockwise" then
if Bbv.x < ev.x - (750 + buW) then
for i=1,1 do
table.remove(bossBullet,Bbi)
end
elseif Bbv.x < (atlMap.width*atlMap.tileWidth-atlMap.width*atlMap.tileWidth) + buW then
for i=1,1 do
table.remove(bossBullet,Bbi)
end
end
end
end
end
end
local r,g,b = 255,255,255
local wRect, hRect = 20,5
function bossBullet_draw()
for Bbi,Bbv in ipairs(bossBullet) do
lg.setColor(r-255,g-255,b); -- blue lasers
if Bbv.BbulletDir == "clockwise" then
lg.rectangle("fill", Bbv.x+Bbv.width-40, Bbv.y+25, wRect, hRect);
elseif Bbv.BbulletDir == "anticlockwise" then
lg.rectangle("fill", Bbv.x-15, Bbv.y+25, wRect, hRect);
end
lg.setColor(r,g,b);
end
end
| 31.382353 | 109 | 0.675726 | 3.125 |
40691adab4ea63804807a6c1c1dda7e4c067c85f
| 3,474 |
ts
|
TypeScript
|
src/ThunkTestRunner.ts
|
churchcommunitybuilder/redux-thunk-testing-library
|
b9ee3d266b37ba7c9f48c1023076d01637f0f009
|
[
"MIT"
] | null | null | null |
src/ThunkTestRunner.ts
|
churchcommunitybuilder/redux-thunk-testing-library
|
b9ee3d266b37ba7c9f48c1023076d01637f0f009
|
[
"MIT"
] | 5 |
2020-09-06T16:19:13.000Z
|
2022-03-07T23:16:15.000Z
|
src/ThunkTestRunner.ts
|
churchcommunitybuilder/redux-thunk-testing-library
|
b9ee3d266b37ba7c9f48c1023076d01637f0f009
|
[
"MIT"
] | null | null | null |
import { mockReturnValue } from './mockReturnValue'
import {
Expectation,
MockImplementation,
MockReturns,
MockReturnsOrImplementation,
Thunk as DefaultThunk,
} from './types'
import { AnyAction, Store } from 'redux'
const isMockReturns = (
mock: MockReturnsOrImplementation,
): mock is MockReturns => typeof mock !== 'function'
const isMockImplementation = (
mock: MockReturnsOrImplementation,
): mock is MockImplementation => typeof mock === 'function'
export class ThunkTestRunner<Thunk extends DefaultThunk, ExtraArg extends any> {
private thunk: Thunk
private expectations: ([Expectation<ExtraArg>, boolean])[] = []
protected isNegated = false
protected store: Store
protected dispatch: jest.Mock
protected getState: jest.Mock
protected extraArg: ExtraArg
get not() {
this.isNegated = true
return this
}
constructor(thunk: Thunk, store: Store, extraArg?: ExtraArg) {
this.extraArg = extraArg
this.store = store
this.thunk = thunk
this.dispatch = jest.fn()
this.getState = jest.fn(this.store.getState)
}
protected mockDependency<
M extends MockReturns | MockImplementation,
F extends jest.Mock
>(mock: M, mockFn: F) {
if (isMockReturns(mock)) {
mockReturnValue(mockFn, mock)
} else if (isMockImplementation(mock)) {
mock(mockFn)
}
return this
}
protected addExpectation(expectation: Expectation<ExtraArg>) {
this.expectations = [...this.expectations, [expectation, this.isNegated]]
this.isNegated = false
return this
}
withDispatch(mockDispatch: MockReturnsOrImplementation) {
return this.mockDependency(mockDispatch, this.dispatch)
}
withActions(...actions: any) {
actions.forEach(action => this.store.dispatch(action))
return this
}
toDispatch(...action: any[]) {
return this.addExpectation(({ dispatch, isNegated }) => {
if (action.length > 0) {
this.getExpectation(dispatch, isNegated).toHaveBeenCalledWith(...action)
} else {
this.getExpectation(dispatch, isNegated).toHaveBeenCalled()
}
})
}
toDispatchActionType(actionCreator: (...args: any[]) => AnyAction) {
return this.addExpectation(({ dispatch, isNegated }) => {
this.getExpectation(dispatch, isNegated).toHaveBeenCalledWith(
expect.objectContaining({ type: actionCreator().type }),
)
})
}
toReturn(expectedOutput: any, strictEqualityCheck = false) {
return this.addExpectation(({ output, isNegated }) => {
const expectation = this.getExpectation(output, isNegated)
if (strictEqualityCheck) {
expectation.toBe(expectedOutput)
} else {
expectation.toEqual(expectedOutput)
}
})
}
toMeetExpectation(expectation: Expectation<ExtraArg>) {
return this.addExpectation(expectation)
}
protected getExpectation(value: any, isNegated = false) {
const expectation = expect(value)
if (isNegated) {
return expectation.not
}
return expectation
}
async run() {
const output = await this.thunk(this.dispatch, this.getState, this.extraArg)
this.expectations.forEach(([expectation, isNegated]) => {
expectation({
dispatch: this.dispatch,
getState: this.getState,
extraArg: this.extraArg,
isNegated,
output,
})
})
return {
dispatch: this.dispatch,
state: this.getState(),
extraArg: this.extraArg,
}
}
}
| 25.357664 | 80 | 0.672712 | 3.0625 |
af95830f1326e9be794403d0df1e6338caff21ac
| 1,790 |
py
|
Python
|
app.py
|
sramako/sigmaBackend
|
7941c915fe95c8a1efc7f357dc020def88de5910
|
[
"MIT"
] | null | null | null |
app.py
|
sramako/sigmaBackend
|
7941c915fe95c8a1efc7f357dc020def88de5910
|
[
"MIT"
] | null | null | null |
app.py
|
sramako/sigmaBackend
|
7941c915fe95c8a1efc7f357dc020def88de5910
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask
from flask import request
import json
from flask_cors import CORS, cross_origin
import pymongo
# import math
import sys
app = Flask(__name__)
myclient = pymongo.MongoClient("mongodb://ako:[email protected]:49365/sigma")
mydb = myclient['sigma']
@app.route('/')
def hello():
return 'Ako: All endponts are live.'
@app.route('/courses')
def courses():
mycol = mydb["courses"]
data = dict()
count = 0
for course in mycol.find({},{'_id':0}):
data[count] = course
count+=1
return json.dumps(data)
@app.route('/coursesize')
def coursesize():
mycol = mydb["courses"]
count = 0
for course in mycol.find({},{'_id':0}):
count+=1
return str(count)
@app.route('/coursetext')
def coursetext():
index = int(request.args.get('index'))
mycol = mydb["courses"]
data = "<p>"
count = 0
for course in mycol.find({},{'_id':0}):
count+=1
if(count==index):
for subject in course['subjects']:
line = subject['subject']+' : '+subject['faculty']
data += line
data += '<br>'
data += ','.join(course['branch'])+' - '+','.join(course['standard'])
data += '<br>'
data += '</p>'
return data
@app.route('/coursename')
def coursename():
index = int(request.args.get('index'))
mycol = mydb["courses"]
data = ""
count = 0
for course in mycol.find({},{'_id':0}):
count+=1
if(count==index):
data += course['name']
return data
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
CORS(app, resources=r'/*')
# app.config['CORS_HEADERS'] = 'Content-Type'
app.run(host='0.0.0.0', port=port,debug=True)
# app.run()
| 24.520548 | 86 | 0.574302 | 3.265625 |
95613191ba386ae6d96439d7da80d57696f97b28
| 473 |
sql
|
SQL
|
DB-Basics-MSSQL-Server/Exams/(Demo)Databases MSSQL Server Exam - 10 Feb 2019/15.SelectTheLessPopularJob.sql
|
emilia98/SoftUni-CSharp-Db
|
c03f09fa9fa2f199c6adc7548dacc85fd3024d0b
|
[
"MIT"
] | null | null | null |
DB-Basics-MSSQL-Server/Exams/(Demo)Databases MSSQL Server Exam - 10 Feb 2019/15.SelectTheLessPopularJob.sql
|
emilia98/SoftUni-CSharp-Db
|
c03f09fa9fa2f199c6adc7548dacc85fd3024d0b
|
[
"MIT"
] | null | null | null |
DB-Basics-MSSQL-Server/Exams/(Demo)Databases MSSQL Server Exam - 10 Feb 2019/15.SelectTheLessPopularJob.sql
|
emilia98/SoftUni-CSharp-Db
|
c03f09fa9fa2f199c6adc7548dacc85fd3024d0b
|
[
"MIT"
] | null | null | null |
USE "ColonialJourney";
/*
Extract from the database the less popular job in the longest journey.
In other words, the job with less assign colonists.
*/
SELECT TOP(1)
res.Id,
t.JobDuringJourney AS [Job Name]
FROM
(
SELECT TOP(1)
j.Id
FROM
"Journeys" AS j
ORDER BY
(j.JourneyEnd - j.JourneyStart) DESC
) AS res
INNER JOIN
TravelCards AS t
ON
res.Id = t.JourneyId
GROUP BY
res.Id,
t.JobDuringJourney
ORDER BY
COUNT(t.JobDuringJourney);
| 16.310345 | 72 | 0.693446 | 3.140625 |
463bbba8a4824eab578780ca35d7382e51c85940
| 848 |
php
|
PHP
|
wstmart/home/controller/Orderrefunds.php
|
Elmwoods/PWareHouse
|
2a266c1a9c961b4088e4272bdad158a3ff71430a
|
[
"Apache-2.0"
] | null | null | null |
wstmart/home/controller/Orderrefunds.php
|
Elmwoods/PWareHouse
|
2a266c1a9c961b4088e4272bdad158a3ff71430a
|
[
"Apache-2.0"
] | null | null | null |
wstmart/home/controller/Orderrefunds.php
|
Elmwoods/PWareHouse
|
2a266c1a9c961b4088e4272bdad158a3ff71430a
|
[
"Apache-2.0"
] | null | null | null |
<?php
namespace wstmart\home\controller;
use wstmart\common\model\OrderRefunds as M;
/**
* ============================================================================
* WSTMart多用户商城
* 版权所有 2016-2066 广州商淘信息科技有限公司,并保留所有权利。
* 官网地址:http://www.wstmart.net
* 交流社区:http://bbs.shangtaosoft.com
* 联系QQ:153289970
* ----------------------------------------------------------------------------
* 这不是一个自由软件!未经本公司授权您只能在不用于商业目的的前提下对程序代码进行修改和使用;
* 不允许对程序代码以任何形式任何目的的再发布。
* ============================================================================
* 订单退款控制器
*/
class Orderrefunds extends Base{
/**
* 用户申请退款
*/
public function refund(){
$m = new M();
$rs = $m->refund();
return $rs;
}
/**
* 商家处理是否同意
*/
public function shopRefund(){
$m = new M();
$rs = $m->shopRefund();
return $rs;
}
}
| 24.228571 | 80 | 0.444575 | 3.109375 |
fa7fdf0d58c70195d34d1ca216b0ca4b0fbdec95
| 2,033 |
cpp
|
C++
|
Competitive Programming/Bit Manipulation/Divide two integers without using multiplication, division and mod operator.cpp
|
shreejitverma/GeeksforGeeks
|
d7bcb166369fffa9a031a258e925b6aff8d44e6c
|
[
"MIT"
] | 2 |
2022-02-18T05:14:28.000Z
|
2022-03-08T07:00:08.000Z
|
Competitive Programming/Bit Manipulation/Divide two integers without using multiplication, division and mod operator.cpp
|
shivaniverma1/Competitive-Programming-1
|
d7bcb166369fffa9a031a258e925b6aff8d44e6c
|
[
"MIT"
] | 6 |
2022-01-13T04:31:04.000Z
|
2022-03-12T01:06:16.000Z
|
Competitive Programming/Bit Manipulation/Divide two integers without using multiplication, division and mod operator.cpp
|
shivaniverma1/Competitive-Programming-1
|
d7bcb166369fffa9a031a258e925b6aff8d44e6c
|
[
"MIT"
] | 2 |
2022-02-14T19:53:53.000Z
|
2022-02-18T05:14:30.000Z
|
/*
https://www.geeksforgeeks.org/divide-two-integers-without-using-multiplication-division-mod-operator/
Divide two integers without using multiplication, division and mod operator
Difficulty Level : Medium
Last Updated : 03 Sep, 2021
Geek Week
Given a two integers say a and b. Find the quotient after dividing a by b without using multiplication, division and mod operator.
Example:
Input : a = 10, b = 3
Output : 3
Input : a = 43, b = -8
Output : -5
Recommended: Please try your approach on {IDE} first, before moving on to the solution.
Approach : Keep subtracting the dividend from the divisor until dividend becomes less than divisor.
The dividend becomes the remainder, and the number of times subtraction is done becomes the quotient.
*/
// C++ implementation to Divide two
// integers without using multiplication,
// division and mod operator
#include <bits/stdc++.h>
using namespace std;
// Function to divide a by b and
// return floor value it
int divide(long long dividend, long long divisor)
{
// Calculate sign of divisor i.e.,
// sign will be negative only iff
// either one of them is negative
// otherwise it will be positive
int sign = ((dividend < 0) ^
(divisor < 0))
? -1
: 1;
// remove sign of operands
dividend = abs(dividend);
divisor = abs(divisor);
// Initialize the quotient
long long quotient = 0, temp = 0;
// test down from the highest bit and
// accumulate the tentative value for
// valid bit
for (int i = 31; i >= 0; --i)
{
if (temp + (divisor << i) <= dividend)
{
temp += divisor << i;
quotient |= 1LL << i;
}
}
//if the sign value computed earlier is -1 then negate the value of quotient
if (sign == -1)
quotient = -quotient;
return quotient;
}
// Driver code
int main()
{
int a = 10, b = 3;
cout << divide(a, b) << "\n";
a = 43, b = -8;
cout << divide(a, b);
return 0;
}
| 26.064103 | 130 | 0.632071 | 3.625 |
bb886641a83d2cf7aeacf8e437b9e0b880a06f1f
| 1,235 |
cs
|
C#
|
Assets/Scripts/ObjectPooler.cs
|
vikkymaurya/Object-Pooling
|
3c3a0713f37145b45c852710151d5a2504196436
|
[
"MIT"
] | null | null | null |
Assets/Scripts/ObjectPooler.cs
|
vikkymaurya/Object-Pooling
|
3c3a0713f37145b45c852710151d5a2504196436
|
[
"MIT"
] | null | null | null |
Assets/Scripts/ObjectPooler.cs
|
vikkymaurya/Object-Pooling
|
3c3a0713f37145b45c852710151d5a2504196436
|
[
"MIT"
] | null | null | null |
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class ObjectPooler : MonoBehaviour
{
public static ObjectPooler Instance;
public GameObject prefab;
public float poolAmount;
List<GameObject> bulletPools;
bool newObject = true;
private void Awake()
{
if(Instance == null)
{
Instance = this;
}else if(Instance !=this)
{
Destroy(gameObject);
}
}
void Start()
{
bulletPools = new List<GameObject>();
for (int i = 0; i < poolAmount; i++)
{
GameObject temBullet = Instantiate(prefab) as GameObject;
temBullet.SetActive(false);
bulletPools.Add(temBullet);
}
}
public GameObject ObjectFromPool()
{
for (int i = 0; i < bulletPools.Count; i++)
{
if(!bulletPools[i].activeInHierarchy)
{
return bulletPools[i];
}
}
if (newObject)
{
GameObject temBullet = Instantiate(prefab) as GameObject;
bulletPools.Add(temBullet);
return temBullet;
}
return null;
}
}
| 21.666667 | 69 | 0.534413 | 3.171875 |
a1cab002e71952814e80353f31f34caa211e763c
| 3,424 |
lua
|
Lua
|
include/loader.lua
|
Bilal2453/Script-Bot
|
035739ad805818ff6afab3a41bf28f41ccc042d2
|
[
"MIT"
] | 11 |
2020-03-02T17:32:49.000Z
|
2020-09-07T17:09:48.000Z
|
include/loader.lua
|
Bilal2453/Script-Bot
|
035739ad805818ff6afab3a41bf28f41ccc042d2
|
[
"MIT"
] | null | null | null |
include/loader.lua
|
Bilal2453/Script-Bot
|
035739ad805818ff6afab3a41bf28f41ccc042d2
|
[
"MIT"
] | null | null | null |
local fs = require 'fs'
local pathJoin = require 'pathjoin'.pathJoin
local new_fs_event = require 'uv'.new_fs_event
local stat, exists, scandir, readfile = fs.statSync, fs.existsSync, fs.scandirSync, fs.readFileSync
local module = {}
local function call(c, ...)
if type(c) == "function" then
return pcall(c, ...)
end
return
end
local function read(p, ...)
for _, v in ipairs{...} do
p = pathJoin(p, v)
end
local fileData, errmsg = readfile(p)
if not fileData then
return false, errmsg
end
return fileData
end
local function watch(path, callback)
local stats = {}
local oldStat = stat(path)
local isFile = oldStat.type == 'file'
local function rPath(p, n) return isFile and p or pathJoin(p, n) end
if isFile then
stats[path] = oldStat
else
local joined
for name in scandir(path) do
joined = pathJoin(path, name)
stats[joined] = stat(joined)
end
end
local fsEvent = new_fs_event()
fsEvent:start(path, {}, function(err, name, event)
if err then logger:log(1, err) return end
if not event.change then
local newPath = rPath(path, name)
-- NOTE: event.rename will be emitted even on delete-
-- but on the real rename event two event.rename will be emitted
-- omg please luvit fix that, this code should handle both
if not exists(newPath) then -- File Deleted?
stats[newPath] = nil -- Remove old stats
else -- File Created?
stats[newPath] = stat(newPath) -- Add the new stats
end
return
end
local filePath = rPath(path, name)
local old = stats[filePath]
local new = stat(filePath)
stats[filePath] = new
if new.size ~= 0 and (old.mtime.sec ~= new.mtime.sec or old.mtime.nsec ~= new.mtime.nsec) then
return callback(name)
end
end)
return fsEvent
end
-- TODO: Better and Cleaner loader... this one is just ugly and buggy.
local function loadDirec(direc, filesPattern, spaceName, baseMesg, beforeExec, afterExec)
spaceName = spaceName and spaceName.. ' : ' or ''
local function loadFile(name)
local filePath = pathJoin(direc, name)
local oName = name
name = name:gsub(filesPattern, '')
if not exists(filePath) then
logger:log(1, 'Attempt to find "%s" %s', name, baseMesg)
return
end
call(beforeExec, name)
local succ, result = read(filePath)
if not succ then
logger:log(1, 'Attempt to read "%s" : %s', filePath, result)
return
end
local runtimeSuccess, loader, errMesg = call(load, succ, oName, 't', env)
succ, result = call(loader)
runtimeSuccess = runtimeSuccess and loader
if not (runtimeSuccess and succ) then
logger:log(1, 'Attempt to load "%s" %s :\n\t\t\t\t %s', name, baseMesg,
tostring(runtimeSuccess and result or loader or errMesg)
)
return
end
call(afterExec, name, result)
logger:log(3, '%sSuccesfuly loaded "%s" %s', spaceName, name, baseMesg)
end
local function loadAll()
for filePath in scandir(direc) do
if filePath:find(filesPattern) then
loadFile(filePath)
end
end
end
loadAll()
-- Watch for changes and reload
local e = watch(direc, function(name)
if not name:find(filesPattern) then return end
if not exists(pathJoin(direc, name)) then return end
loadFile(name)
end)
return loadAll, e
end
module.loadDirec = loadDirec
module.watch = watch
return module
| 23.613793 | 100 | 0.663551 | 3.171875 |
1abcb32d42dc6682a4c7ed951e5530184c7220b2
| 4,024 |
py
|
Python
|
Perfect_game_player/pgameplayer/minimax_tree.py
|
lquispel/Chain-Reaction-Simulator
|
d826b3453d1a634186ab345e342e6fa7b44a2154
|
[
"MIT"
] | null | null | null |
Perfect_game_player/pgameplayer/minimax_tree.py
|
lquispel/Chain-Reaction-Simulator
|
d826b3453d1a634186ab345e342e6fa7b44a2154
|
[
"MIT"
] | null | null | null |
Perfect_game_player/pgameplayer/minimax_tree.py
|
lquispel/Chain-Reaction-Simulator
|
d826b3453d1a634186ab345e342e6fa7b44a2154
|
[
"MIT"
] | null | null | null |
import logging
logger = logging.getLogger("minimax")
# Straightforward minimax tree algorithm
PINF = 100
NINF = -100
class Node:
''' Node of minimax tree associated with a board state and player'''
def __init__(self):
self.state = None
self.value = None
self.player = True
self.best_move = None
def if_leaf(self):
return True
def generate_moves(self):
return []
def evaluate(self):
return self.value
def __str__(self):
str = ''
for r in range(len(self.state)):
str += '|'.join(self.state[r]) + '\n'
return str
def minimax(node, player):
'''
Main minimax function that obtains the best move to take
:return:
'''
if node.if_leaf():
return ([], node.evaluate())
if player:
maxv = NINF
possible_moves = node.generate_moves(player)
for child in possible_moves:
(_, child.value) = minimax(child, not player)
if child.value > maxv:
maxv = child.value
node.best_move = child.state
node.value = maxv
logger.debug("{} == {}".format(node.state, node.value))
return (node.best_move, maxv)
else:
minv = PINF
possible_moves = node.generate_moves(player)
for child in possible_moves:
(_, child.value) = minimax(child, not player)
if child.value < minv:
minv = child.value
node.best_move = child.state
node.value = minv
logger.debug("{} == {}".format(node.state, node.value))
return (node.best_move, minv)
# TODO: refactor to return next move too
def depth_limited_minimax(node, depth, player):
'''
Minimax algorithm that returns after a particular depth is reached
:param node:
:param depth:
:param player:
:return:
'''
if node.if_leaf() or depth == 0:
return node.evaluate()
if player:
maxv = NINF
possible_moves = node.generate_moves(player)
for child in possible_moves:
child.value = depth_limited_minimax(child, depth - 1, not player)
if child.value > maxv:
maxv = child.value
node.best_move = child.state
node.value = maxv
logger.debug("{} == {}".format(node, node.value))
return maxv
else:
minv = PINF
possible_moves = node.generate_moves(player)
for child in possible_moves:
child.value = depth_limited_minimax(child, depth - 1, not player)
if child.value < minv:
minv = child.value
node.best_move = child.state
node.value = minv
logger.debug("{} == {}".format(node, node.value))
return minv
# alpha beta pruning takes a while
def alpha_beta_pruning_minimax(node, player, alpha, beta):
'''
Minimax variant that maintains max and min value for every node and prunes branches that are unnecessary
'''
if node.if_leaf():
return node.evaluate()
if player:
possible_moves = node.generate_moves(player)
for child in possible_moves:
child.value = alpha_beta_pruning_minimax(child, not player, alpha, beta)
if child.value > alpha:
alpha = child.value
node.best_move = child.state
if alpha > beta:
break
node.value = alpha
logger.debug(" {} == {}".format(node, node.value))
return alpha
else:
possible_moves = node.generate_moves(player)
for child in possible_moves:
child.value = alpha_beta_pruning_minimax(child, not player, alpha, beta)
if child.value < beta:
beta = child.value
node.best_move = child.state
if alpha > beta:
break
node.value = beta
logger.debug(" {} == {}".format(node, node.value))
return beta
| 27.37415 | 108 | 0.572316 | 3.34375 |
6de5fede6f537d23cfb405ea62277b111806c5b8
| 9,695 |
ts
|
TypeScript
|
client/src/statemachine/explore/garden.ts
|
stevenwaterman/NoTimeToStalk
|
3884f3ad4c112b6c587e68d734f93bd68823b9bd
|
[
"MIT"
] | null | null | null |
client/src/statemachine/explore/garden.ts
|
stevenwaterman/NoTimeToStalk
|
3884f3ad4c112b6c587e68d734f93bd68823b9bd
|
[
"MIT"
] | null | null | null |
client/src/statemachine/explore/garden.ts
|
stevenwaterman/NoTimeToStalk
|
3884f3ad4c112b6c587e68d734f93bd68823b9bd
|
[
"MIT"
] | null | null | null |
import { makeBackgrounds, MonologueNode, OptionNode } from "../controller";
import { atriumStart } from "./atrium";
import { balconyCharacters, bathroomWindowCharacters, confirmWeapon, hasBuff, isHoldingWeapon, isHostHere, loungeWindowCharacters, studyWindowCharacters } from "./explore";
import { getGlobalOptions } from "./global";
import { porchStart } from "./porch";
import { nodeStore, updateState } from "../state";
export type GardenFlags = {
lookAround: boolean;
graves: boolean;
fountain: boolean;
vegetables: boolean;
};
const backgrounds = makeBackgrounds("garden", [
"wide",
"wide1",
"wide2",
"wide3",
"grave1",
"grave2",
"fountain",
"chair",
"windows1",
"windows2",
"windows3",
"tree",
"treeTop",
"spade",
"vegetables"
])
export const gardenStartAtrium: MonologueNode = {
type: "MONOLOGUE",
onEnter: state => {
const wet = hasBuff(state, "wet") || !isHoldingWeapon(state, "umbrella");
updateState({ location: "garden", action: "leaving the house through the back door.", buffs: { wet }})(state);
},
backgroundUrl: backgrounds.wide,
text: state => [
"You walk through the back door into the luscious rear garden of the manor house.",
isHoldingWeapon(state, "umbrella") ? "It's raining, but your umbrella protects you." : "It's raining, and you're now wet."
],
next: () => nodeStore.set(gardenOptions)
}
export const gardenStartPorch: MonologueNode = {
type: "MONOLOGUE",
onEnter: state => {
const wet = hasBuff(state, "wet") || !isHoldingWeapon(state, "umbrella");
updateState({ location: "garden", action: "entering the garden from the porch.", buffs: { wet }})(state);
},
backgroundUrl: backgrounds.wide,
text: state => [
"You leave the porch to walk into the luscious rear garden of the manor house.",
isHoldingWeapon(state, "umbrella") ? "It's raining, but your umbrella protects you." : "It's raining, and you're now wet."
],
next: () => nodeStore.set(gardenOptions)
}
export const gardenStartIvy: MonologueNode = {
type: "MONOLOGUE",
onEnter: state => {
const wet = hasBuff(state, "wet") || !isHoldingWeapon(state, "umbrella");
updateState({ location: "garden", action: "climbing down from the balcony into the garden.", suspicious: true, buffs: { wet }})(state);
},
backgroundUrl: backgrounds.wide,
text: () => ["You find yourself in the luscious rear garden of the manor house."],
next: () => nodeStore.set(gardenOptions)
}
export const gardenStartStudy: MonologueNode = {
type: "MONOLOGUE",
onEnter: state => {
const wet = hasBuff(state, "wet") || !isHoldingWeapon(state, "umbrella");
updateState({ location: "garden", action: "climbing through the broken study window into the garden.", suspicious: true, buffs: { wet }})(state);
},
backgroundUrl: backgrounds.wide,
text: state => [
"You land in the luscious rear garden of the manor house.",
isHoldingWeapon(state, "umbrella") ? "It's raining, but your umbrella protects you." : "It's raining, and you're now wet."
],
next: () => nodeStore.set(gardenOptions)
}
const gardenOptions: OptionNode = {
type: "OPTION",
prompt: "What do you want to do?",
backgroundUrl: backgrounds.wide1,
options: [
{
visible: state => !state.explore.roomFlags.garden.lookAround,
text: "Look around",
next: () => nodeStore.set(lookAround)
},
{
visible: state => state.explore.roomFlags.garden.lookAround && !state.explore.roomFlags.garden.fountain,
text: "Inspect the fountain",
next: () => nodeStore.set(inspectFountain)
},
{
visible: state => state.explore.roomFlags.garden.lookAround,
text: "Look through the windows",
next: () => nodeStore.set(lookThroughWindows1)
},
{
visible: state => state.explore.roomFlags.garden.lookAround && !state.explore.roomFlags.garden.vegetables,
text: "Inspect the allotments",
next: () => nodeStore.set(inspectAllotments)
},
{
visible: state => state.explore.roomFlags.garden.lookAround && !state.explore.roomFlags.garden.graves,
text: "Inspect the graves",
next: () => nodeStore.set(inspectGraves1)
},
...getGlobalOptions(() => gardenOptions, backgrounds.wide),
{
visible: state => state.explore.roomFlags.garden.lookAround,
text: "Climb a tree",
next: () => nodeStore.set(climbTree)
},
{
visible: state => state.explore.roomFlags.garden.lookAround && isHoldingWeapon(state, "spade"),
text: "Pick up a spade",
next: state => confirmWeapon(state, gardenOptions, takeSpade),
disabledReasons: [{ disabled: isHostHere, reason: "You can't do that now, The Admiral will see you..." }]
},
{
text: "Go into the Atrium",
next: () => nodeStore.set(atriumStart)
},
{
text: "Go into the Porch",
next: () => nodeStore.set(porchStart)
},
]
}
const lookAround: MonologueNode = {
type: "MONOLOGUE",
onEnter: updateState({ action: "having a look around.", roomFlags: { garden: { lookAround: true }}}),
backgroundUrl: backgrounds.wide1,
text: () => ["You have a look around the garden, taking in all of the sights and the smells of your damp surroundings."],
next: () => nodeStore.set(lookAround2)
}
const lookAround2: MonologueNode = {
type: "MONOLOGUE",
backgroundUrl: backgrounds.wide2,
text: () => ["The candle-lit garden tries to give off comfy vibes, but you can't ignore the concerning number of graves under the tree."],
next: () => nodeStore.set(lookAround3)
}
const lookAround3: MonologueNode = {
type: "MONOLOGUE",
backgroundUrl: backgrounds.wide3,
text: () => ["You're sure it looks much more peaceful when it's not dark and rainy and on the night of a murder."],
next: () => nodeStore.set(gardenOptions)
}
const inspectGraves1: MonologueNode = {
type: "MONOLOGUE",
onEnter: updateState({ action: "reading the graves.", suspicious: true, roomFlags: { garden: { graves: true }}}),
backgroundUrl: backgrounds.grave1,
text: () => ["You inspect the graves under the tree. They don't look very old, but already the names have started to fade away."],
next: () => nodeStore.set(inspectGraves2)
}
const inspectGraves2: MonologueNode = {
type: "MONOLOGUE",
backgroundUrl: backgrounds.grave2,
text: () => ["There are fresh flowers left by the graves - the Admiral must care deeply for these people. You ponder their significance."],
next: () => nodeStore.set(gardenOptions)
}
const climbTree: MonologueNode = {
type: "MONOLOGUE",
onEnter: updateState({ action: "climbing the tree.", suspicious: true, buffs: { muddy: true } }),
backgroundUrl: backgrounds.tree,
sounds: [{ filePath: "effects/explore/garden/climbTree" }],
text: () => ["You decide to climb up the tree. It's a little slippery and you get muddy in the process, but you manage to lean on one of the sturdier branches."],
next: () => nodeStore.set(climbTreeTop)
}
const climbTreeTop: MonologueNode = {
type: "MONOLOGUE",
onEnter: updateState({ action: "sitting at the top of the tree.", suspicious: true }),
backgroundUrl: backgrounds.treeTop,
text: state => ["From the top of the tree, you can see the balcony.", ...balconyCharacters(state)],
next: () => nodeStore.set(gardenOptions)
}
const takeSpade: MonologueNode = {
type: "MONOLOGUE",
onEnter: updateState({ action: "picking up a spade.", weapon: "spade", suspicious: true, buffs: { wet: true }}),
sounds: [{filePath: "effects/explore/garden/spade"}],
backgroundUrl: backgrounds.spade,
text: () => [
"After assessing all your options of optimal garden tool to acquire, you take a spade.",
"The spade doesn't help protect you from the rain."
],
next: () => nodeStore.set(gardenOptions)
}
const inspectFountain: MonologueNode = {
type: "MONOLOGUE",
onEnter: updateState({ action: "looking in the fountain.", roomFlags: { garden: { fountain: true }}}),
backgroundUrl: backgrounds.fountain,
text: () => ["You take a look in the fountain. The water's a bit grimy from the leaves and dirt ending up in there, and you can barely hear it over the rain, but it looks adequately grand for this house."],
next: () => nodeStore.set(gardenOptions)
}
const inspectAllotments: MonologueNode = {
type: "MONOLOGUE",
onEnter: updateState({ action: "looking at the vegetables in the allotment.", roomFlags: { garden: { vegetables: true }}}),
backgroundUrl: backgrounds.vegetables,
text: () => [
"You look into the allotments and see a large variety of vegetables growing.",
"It's the harvest festival soon - something you read about during your degree. Every year, in the autumn, people would collect their finest fruits and vegetables from that year's harvest. Then, they'd perform a ritual where they left a sacrifice to the green giant, to ensure the next year's harvest was bountiful.",
"You'd love to ask the Admiral more about the green giant."
],
next: () => nodeStore.set(gardenOptions)
}
const lookThroughWindows1: MonologueNode = {
type: "MONOLOGUE",
onEnter: updateState({ action: "looking through the windows into the house." }),
backgroundUrl: backgrounds.windows2,
text: state => loungeWindowCharacters(state),
next: () => nodeStore.set(lookThroughWindows2)
}
const lookThroughWindows2: MonologueNode = {
type: "MONOLOGUE",
backgroundUrl: backgrounds.windows1,
text: state => studyWindowCharacters(state),
next: () => nodeStore.set(lookThroughWindows3)
}
const lookThroughWindows3: MonologueNode = {
type: "MONOLOGUE",
backgroundUrl: backgrounds.windows3,
text: state => bathroomWindowCharacters(state),
next: () => nodeStore.set(gardenOptions)
}
| 39.410569 | 321 | 0.685611 | 3 |
d633a9c553538df215bfc216b45e03292906d9ff
| 1,366 |
cs
|
C#
|
C#/Man/Division/Program.cs
|
Futupas/Man_1819
|
8316c02cb2b55612c4577742005c824e2017cbe6
|
[
"MIT"
] | null | null | null |
C#/Man/Division/Program.cs
|
Futupas/Man_1819
|
8316c02cb2b55612c4577742005c824e2017cbe6
|
[
"MIT"
] | null | null | null |
C#/Man/Division/Program.cs
|
Futupas/Man_1819
|
8316c02cb2b55612c4577742005c824e2017cbe6
|
[
"MIT"
] | null | null | null |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
namespace Division
{
class Program
{
static void Main(string[] args)
{
try
{
int a, b, d, ch, nac; //a/b=c
//dilene, chastka, numbers after comma
Console.WriteLine("a / b = c");
Console.Write("a = "); a = Int32.Parse(Console.ReadLine());
Console.Write("b = "); b = Int32.Parse(Console.ReadLine());
Console.Write("nac = "); nac = Int32.Parse(Console.ReadLine());
Console.Clear();
d = a / b;
ch = a % b;
Console.WriteLine(d);
Console.WriteLine(".");
a = ch * 10;
for (int i = 0; i < nac; i++)
{
d = a / b;
ch = a % b;
Console.WriteLine(d);
a = ch * 10;
Thread.Sleep(10);
}
Console.ReadLine();
}
catch (Exception ex)
{
Console.ForegroundColor = ConsoleColor.Red;
Console.WriteLine(ex.Message);
Console.ReadLine();
}
}
}
}
| 29.06383 | 79 | 0.402635 | 3.109375 |
384b3322a6aac4162a447774a4d4a7ed765314f1
| 1,078 |
cs
|
C#
|
CodeGenerator/Config/EntitySchemaCollection.cs
|
conorjgallagher/Dynamics.ExtendedSvcUtil
|
d058ae8c97ce46c7c1f642b9ae24b822b50be1e6
|
[
"MIT"
] | 4 |
2016-06-24T20:12:57.000Z
|
2020-02-26T15:08:36.000Z
|
CodeGenerator/Config/EntitySchemaCollection.cs
|
conorjgallagher/Dynamics.ExtendedSvcUtil
|
d058ae8c97ce46c7c1f642b9ae24b822b50be1e6
|
[
"MIT"
] | 3 |
2017-01-30T14:58:53.000Z
|
2019-04-02T13:44:16.000Z
|
CodeGenerator/Config/EntitySchemaCollection.cs
|
conorjgallagher/Dynamics.ExtendedSvcUtil
|
d058ae8c97ce46c7c1f642b9ae24b822b50be1e6
|
[
"MIT"
] | 1 |
2021-08-23T08:22:16.000Z
|
2021-08-23T08:22:16.000Z
|
using System.Collections.Generic;
using System.Configuration;
using System.Linq;
namespace CodeGenerator.Config
{
[ConfigurationCollection(typeof(EntitySchema), AddItemName = "entity")]
public class EntitySchemaCollection : ConfigurationElementCollection, IEnumerable<EntitySchema>
{
protected override ConfigurationElement CreateNewElement()
{
return new EntitySchema();
}
protected override object GetElementKey(ConfigurationElement element)
{
var configElement = element as EntitySchema;
if (configElement != null)
return configElement.Name;
return null;
}
public EntitySchema this[int index]
{
get
{
return BaseGet(index) as EntitySchema;
}
}
IEnumerator<EntitySchema> IEnumerable<EntitySchema>.GetEnumerator()
{
return (from i in Enumerable.Range(0, Count)
select this[i])
.GetEnumerator();
}
}
}
| 28.368421 | 99 | 0.601113 | 3.03125 |
36acdd54b92a33a235fc3e4435a94c5177ceea53
| 1,368 |
dart
|
Dart
|
system_metrics_widget/lib/src/widgets/metrics/usage_indicator/usage_indicator_widget.dart
|
DisDis/dslideshow
|
58e9b2afbcd43c7c7f99a2382dd109bcef5c4185
|
[
"MIT"
] | 1 |
2021-04-29T14:49:00.000Z
|
2021-04-29T14:49:00.000Z
|
system_metrics_widget/lib/src/widgets/metrics/usage_indicator/usage_indicator_widget.dart
|
DisDis/dslideshow
|
58e9b2afbcd43c7c7f99a2382dd109bcef5c4185
|
[
"MIT"
] | null | null | null |
system_metrics_widget/lib/src/widgets/metrics/usage_indicator/usage_indicator_widget.dart
|
DisDis/dslideshow
|
58e9b2afbcd43c7c7f99a2382dd109bcef5c4185
|
[
"MIT"
] | 1 |
2021-07-15T18:34:22.000Z
|
2021-07-15T18:34:22.000Z
|
import 'dart:math';
import 'package:flutter/cupertino.dart';
import 'package:flutter/material.dart';
import 'package:system_metrics_widget/src/environment/settings.dart';
import 'package:system_metrics_widget/src/widgets/metrics/usage_indicator/usage_bar.dart';
abstract class UsageIndicatorWidget extends StatelessWidget {
final String title;
final String total;
final String free;
final String used;
final int? usagePercent;
UsageIndicatorWidget({
required this.title,
required this.total,
required this.free,
required this.used,
required this.usagePercent,
});
@override
Widget build(BuildContext context) {
return Column(
crossAxisAlignment: CrossAxisAlignment.start,
children: [
Text(
'$title: $total, used: $used, free: $free, usage $usagePercent %',
style: Settings.metricsDetailsTextStyle,
),
Padding(
padding: EdgeInsets.only(top: 4),
child: UsageBar(
usagePercent: usagePercent,
),
),
],
);
}
static String formatBytes(int bytes, int decimals) {
if (bytes <= 0) return '0 B';
const suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'];
var i = (log(bytes) / log(1024)).floor();
return ((bytes / pow(1024, i)).toStringAsFixed(decimals)) + ' ' + suffixes[i];
}
}
| 27.918367 | 90 | 0.642544 | 3.09375 |
2cbb88239d05ecb172bf155a8c4485a6b8e67a30
| 1,687 |
py
|
Python
|
JsonCodeTools/objects_common/enumType.py
|
kamlam/EAGLE-Open-Model-Profile-and-Tools-1
|
42690535de136251d8a464ad254ac0ea344d383a
|
[
"Apache-2.0"
] | null | null | null |
JsonCodeTools/objects_common/enumType.py
|
kamlam/EAGLE-Open-Model-Profile-and-Tools-1
|
42690535de136251d8a464ad254ac0ea344d383a
|
[
"Apache-2.0"
] | null | null | null |
JsonCodeTools/objects_common/enumType.py
|
kamlam/EAGLE-Open-Model-Profile-and-Tools-1
|
42690535de136251d8a464ad254ac0ea344d383a
|
[
"Apache-2.0"
] | null | null | null |
class EnumType(object):
# Internal data storage uses integer running from 0 to range_end
# range_end is set to the number of possible values that the Enum can take on
# External representation of Enum starts at 1 and goes to range_end + 1
def __init__(self, initial_value):
self.set(initial_value)
def load_json(self, json_struct):
self.set(json_struct)
def json_serializer(self):
# Returns a string
# This could be changed to encode enums as integers when transmitting messages
if self.value < 0:
return None
else:
return type(self).possible_values[self.value]
def __str__(self):
return str(self.json_serializer())
def get(self):
# Returns an integer, using the external representation
return self.value + 1
def set(self, value):
# The value to set can be either a string or an integer
if type(value) is str:
# This will raise ValueError for wrong assignments
try:
self.value = type(self).possible_values.index(value)
except ValueError:
raise ValueError('', value, type(self).possible_values)
elif type(value) is int:
if value >= 0 and value <= type(self).range_end:
# External representation of Enum starts at 1, internal at 0. External value 0 by default
# to indicate empty object.
value = value - 1
self.value = value
else:
raise ValueError('', value, type(self).range_end)
else:
raise TypeError('', value, 'string or integer')
| 37.488889 | 105 | 0.608773 | 3.46875 |
dda2b90a2bd4a911ebb325dfb5248e19d573fae1
| 1,602 |
py
|
Python
|
src/network_state_graph.py
|
RoseBay-Consulting/BlockSim
|
95dcf2fde05877bdcf59610fd3c3a3314b892f6c
|
[
"MIT"
] | 13 |
2019-03-14T09:01:51.000Z
|
2020-11-21T18:46:59.000Z
|
src/network_state_graph.py
|
RoseBay-Consulting/BlockSim
|
95dcf2fde05877bdcf59610fd3c3a3314b892f6c
|
[
"MIT"
] | null | null | null |
src/network_state_graph.py
|
RoseBay-Consulting/BlockSim
|
95dcf2fde05877bdcf59610fd3c3a3314b892f6c
|
[
"MIT"
] | 12 |
2019-03-14T07:54:29.000Z
|
2021-04-13T16:57:23.000Z
|
#import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import random
# use graphviz
def show_network(data,labels):
pass
def csv_loader():
data = pd.read_csv("config/network_model.csv")
data.set_index('node',inplace=True)
nodes=data.columns.tolist()
nodeID=[int(i) for i in nodes]
network_df = pd.DataFrame(data.values,columns=nodeID,index=nodeID)
print(network_df)
graph = nx.from_numpy_matrix(network_df.values)
#nx.draw(graph)
#plt.show()
return network_df,nodeID
def network_creator(nodeID,max_latency):
'''
Arguments:
1. nodeID : List of nodes ID
2. max_latency: Maximum latency to be asserted in communication between nodes
For future improvements, make sure to handle an unconnected node.
'''
dimension= len(nodeID)
# Generate a random adjency matrix of size dimension * dimension for lookup table
np.random.seed(7)
x=np.random.randint(2, size=(dimension, dimension))
# Fill diagonal value with 0 for representing 0 latency for self communication.
np.fill_diagonal(x,0)
# Generate a graph
graph = nx.from_numpy_matrix(x)
# Add latency randomly
for (u, v) in graph.edges():
np.random.seed(7)
graph[u][v]['weight'] = random.randint(1,max_latency)
network_df= pd.DataFrame(nx.to_numpy_array(graph),columns=nodeID,index=nodeID)
print("Printing network")
network_df.to_csv('20_nodes.csv',index=False)
# print(network_df)
# nx.draw(graph)
#nx.draw(nx.from_numpy_array(network_df.values))
return network_df
| 32.04 | 85 | 0.704744 | 3.5625 |
a5755593cdc656f8d04f194f8972fb2ecaa192d2
| 1,686 |
dart
|
Dart
|
lib/src/repositories/employer_repository/employer_provider.dart
|
IAmRealPoca/cvideo-employer-mobile
|
aab821dc66d2eca9c80532c4acbafbd542031fb8
|
[
"MIT"
] | null | null | null |
lib/src/repositories/employer_repository/employer_provider.dart
|
IAmRealPoca/cvideo-employer-mobile
|
aab821dc66d2eca9c80532c4acbafbd542031fb8
|
[
"MIT"
] | null | null | null |
lib/src/repositories/employer_repository/employer_provider.dart
|
IAmRealPoca/cvideo-employer-mobile
|
aab821dc66d2eca9c80532c4acbafbd542031fb8
|
[
"MIT"
] | null | null | null |
import 'dart:convert';
import 'package:cvideo_employer_mobile/src/app_components/app_components.dart';
import 'package:cvideo_employer_mobile/src/models/models.dart';
class EmployerProvider {
static const successCode = 200;
static final EmployerProvider _instance = EmployerProvider._internal();
EmployerProvider._internal();
factory EmployerProvider() {
return _instance;
}
Future<StatisticsModel> fetchStatistics() async {
String apiToken = await AppStorage.instance.readSecureApiToken();
final response = await AppHttpClient.get(
"/employers/current-employer/statistics",
headers: {"Authorization": "bearer $apiToken"});
if (response.statusCode != successCode) {
throw Exception("Failed to loading!");
}
final dynamic json = jsonDecode(Utf8Decoder().convert(response.bodyBytes));
return StatisticsModel.fromJson(json);
// final response = await rootBundle.loadString("assets/json/statistics.json");
// dynamic json = jsonDecode(response);
// return StatisticsModel.fromJson(json);
}
Future<List<RecruitmentModel>> fetchRecruitments() async {
String apiToken = await AppStorage.instance.readSecureApiToken();
final response = await AppHttpClient.get(
"/employers/current-employer/recruitment-posts",
headers: {"Authorization": "bearer $apiToken"});
if (response.statusCode != successCode) {
throw Exception("Failed to loading!");
}
final List<dynamic> json =
jsonDecode(Utf8Decoder().convert(response.bodyBytes));
return json.map((e) => RecruitmentModel.fromJson(e)).toList();
}
}
| 31.222222 | 84 | 0.692764 | 3.203125 |
06d3d3fcbfb72b630cfccde711744699d4c5e431
| 2,274 |
py
|
Python
|
maestro/container.py
|
kstaken/maestro
|
5f7537acece4612ef0c9a5b8f59f060aa980468e
|
[
"MIT"
] | 113 |
2015-01-02T20:42:06.000Z
|
2022-02-06T09:48:13.000Z
|
maestro/container.py
|
kstaken/maestro
|
5f7537acece4612ef0c9a5b8f59f060aa980468e
|
[
"MIT"
] | 3 |
2015-06-12T10:25:51.000Z
|
2020-11-25T04:46:37.000Z
|
maestro/container.py
|
toscanini/maestro
|
5f7537acece4612ef0c9a5b8f59f060aa980468e
|
[
"MIT"
] | 15 |
2015-01-30T06:20:23.000Z
|
2022-02-06T22:28:50.000Z
|
import os, sys
from exceptions import ContainerError
import utils, StringIO, logging
import py_backend
class Container:
def __init__(self, name, state, config, mounts=None):
self.log = logging.getLogger('maestro')
self.state = state
self.config = config
self.name = name
self.mounts = mounts
if 'hostname' not in self.config:
self.config['hostname'] = name
#if 'command' not in self.config:
# self.log.error("Error: No command specified for container " + name + "\n")
# raise ContainerError('No command specified in configuration')
self.backend = py_backend.PyBackend()
def create(self):
self._start_container(False)
def run(self):
self._start_container()
def rerun(self):
# Commit the current container and then use that image_id to restart.
self.state['image_id'] = self.backend.commit_container(self.state['container_id'])['Id']
self._start_container()
def start(self):
utils.status("Starting container %s - %s" % (self.name, self.state['container_id']))
self.backend.start_container(self.state['container_id'], self.mounts)
def stop(self, timeout=10):
utils.status("Stopping container %s - %s" % (self.name, self.state['container_id']))
self.backend.stop_container(self.state['container_id'], timeout=timeout)
def destroy(self, timeout=None):
self.stop(timeout)
utils.status("Destroying container %s - %s" % (self.name, self.state['container_id']))
self.backend.remove_container(self.state['container_id'])
def get_ip_address(self):
return self.backend.get_ip_address(self.state['container_id'])
def inspect(self):
return self.backend.inspect_container(self.state['container_id'])
def attach(self):
# should probably catch ctrl-c here so that the process doesn't abort
for line in self.backend.attach_container(self.state['container_id']):
sys.stdout.write(line)
def _start_container(self, start=True):
# Start the container
self.state['container_id'] = self.backend.create_container(self.state['image_id'], self.config)
if (start):
self.start()
self.log.info('Container started: %s %s', self.name, self.state['container_id'])
| 33.940299 | 99 | 0.684257 | 3.265625 |
ff6a4aca4dba8e9a2d73153671c922968adad77c
| 4,423 |
py
|
Python
|
tracklets/utils/utils.py
|
yoyomimi/TNT_pytorch
|
165c5da0bc66baf84bb1ddc5fe0cf6a101555c59
|
[
"Apache-2.0"
] | 20 |
2020-03-08T14:47:04.000Z
|
2021-11-14T12:55:28.000Z
|
tracklets/utils/utils.py
|
Mauriyin/TNT_pytorch
|
fed7e182a45e5cf74d827f090d72251eedbd7cc1
|
[
"Apache-2.0"
] | 13 |
2020-03-08T14:39:01.000Z
|
2022-03-12T00:18:08.000Z
|
tracklets/utils/utils.py
|
Mauriyin/TNT_pytorch
|
fed7e182a45e5cf74d827f090d72251eedbd7cc1
|
[
"Apache-2.0"
] | 6 |
2020-03-08T14:47:11.000Z
|
2021-05-21T10:33:05.000Z
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# Created by Mingfei Chen ([email protected])
# Created On: 2020-2-25
# ------------------------------------------------------------------------------
import numpy as np
import pandas as pd
import torch
def get_embeddings(model, frame_feat, max_emb_bs=16):
"""use appearance inference model to get embeddings from frames in fram_list.
Args:
model: pretrained appearance model, eval mode for inference.
frame_feat: (frame_num, 3, size, size).
max_emb_bs: the maximun of the frames fed to the appearance model one time as a batch.
Return:
img_emb_output: (frame_num, emb_dim).
"""
img_bs = frame_feat.shape[0]
p_frame = 0
img_emb = []
while img_bs > max_emb_bs:
img_input = frame_feat[p_frame:p_frame+max_emb_bs]
img_emb.append(model(img_input))
p_frame += max_emb_bs
img_bs -= max_emb_bs
img_input = frame_feat[p_frame:p_frame+img_bs]
if img_bs == 1:
new_input = torch.stack([img_input[0], img_input[0]])
img_emb.append(model(new_input)[-1].view(1, -1))
else:
img_emb.append(model(img_input))
img_emb_output = torch.cat(img_emb)
return img_emb_output
def get_tracklet_pair_input_features(model, img_1, img_2, loc_mat, tracklet_mask_1, tracklet_mask_2, real_window_len, tracklet_pair_features):
"""Reference to the paper "https://arxiv.org/pdf/1811.07258.pdf" Section 4.2 Multi-Scale TrackletNet.
Args:
model: pretrained appearance model, eval mode for inference.
img_1: one batch of tracklet_1_frames, <list>, len eqs batchsize; for each item, <torch.Tensor>, (frame_num, 3, size, size)
img_2: one batch of tracklet_2_frames, <list>, len eqs batchsize; for each item, <torch.Tensor>, (frame_num, 3, size, size)
loc_mat: (bs, window_len, 4), interpolated in the dataset file, indicates the location for each tracklet.
tracklet_mask_1: (bs, window_len, 1), detection status, 1 when there is an object detected.
tracklet_mask_2: (bs, window_len, 1), detection status, 1 when there is an object detected.
tracklet_pair_features: (bs, window_len, emb_dim)
Return:
tracklet_pair_input: (bs, 4+emb_dim, window_len, 3)
"""
assert len(img_1) == len(img_2) == len(tracklet_pair_features)
bs, window_len, emb_dim = tracklet_pair_features.shape
# extract embedding using pretrained appearance model
for i in range(bs):
real_win_len_now = real_window_len[i]
emb_1 = get_embeddings(model, img_1[i])
frame_len_1 = len(emb_1)
emb_2 = get_embeddings(model, img_2[i])
frame_len_2 = len(emb_2)
tracklet_pair_features[i][:frame_len_1] = emb_1.detach()
tracklet_pair_features[i][real_win_len_now-frame_len_2:real_win_len_now] = emb_2.detach()
tracklet_pair_features_np = tracklet_pair_features.cpu().numpy()
# interpolate the embeddings
for i in range(bs):
real_win_len_now = real_window_len[i]
feat_np = tracklet_pair_features_np[i]
feat_np[0][np.where(feat_np[0]==0)] = 1e-5
feat_np[-1][np.where(feat_np[-1]==0)] = 1e-5
feat_pd = pd.DataFrame(data=feat_np).replace(0, np.nan, inplace=False)
feat_pd_np = np.array(feat_pd.interpolate()).astype(np.float32)
if real_win_len_now < window_len:
feat_pd_np[real_win_len_now:] = np.zeros((window_len-real_win_len_now, emb_dim)).astype(np.float32)
tracklet_pair_features[i] = torch.from_numpy(feat_pd_np)
tracklet_pair_features = tracklet_pair_features.cuda(non_blocking=True) # (bs, window_len, emb_dim)
# cat loc_mat (bs, window_len, emb_dim+4, 1)
tracklet_pair_features_with_loc = torch.cat([loc_mat, tracklet_pair_features], dim=-1).unsqueeze(-1)
# expand det mask (bs, window_len, emb_dim+4, 1)
tracklet_mask_1_input = tracklet_mask_1.expand(-1, -1, emb_dim+4).unsqueeze(-1)
tracklet_mask_2_input = tracklet_mask_2.expand(-1, -1, emb_dim+4).unsqueeze(-1)
# cat pair_input (bs, window_len, emb_dim+4, 3)
tracklet_pair_input = torch.cat([tracklet_pair_features_with_loc, tracklet_mask_1_input, tracklet_mask_2_input], dim=-1)
return tracklet_pair_input
| 42.528846 | 142 | 0.656116 | 3.109375 |
5d088baa36d8e56c9e85b240aeed3993c1dff95c
| 3,138 |
dart
|
Dart
|
lib/src/external/universal_http_client.dart
|
EdsonMello-code/uno
|
636669e8ffa6d389002b2ab41e76907fd84c412d
|
[
"MIT"
] | 38 |
2021-11-14T10:36:41.000Z
|
2022-03-12T22:14:22.000Z
|
lib/src/external/universal_http_client.dart
|
EdsonMello-code/uno
|
636669e8ffa6d389002b2ab41e76907fd84c412d
|
[
"MIT"
] | 3 |
2021-11-15T02:56:55.000Z
|
2022-01-19T19:44:06.000Z
|
lib/src/external/universal_http_client.dart
|
EdsonMello-code/uno
|
636669e8ffa6d389002b2ab41e76907fd84c412d
|
[
"MIT"
] | 3 |
2021-11-15T07:08:26.000Z
|
2021-11-16T20:12:30.000Z
|
import 'dart:convert';
import '../infra/infra.dart';
import 'package:universal_io/io.dart';
class UniversalHttpClient implements HttpDatasource {
final HttpClient client;
const UniversalHttpClient(this.client);
@override
Future<Response> fetch(Request unoRequest) async {
client.connectionTimeout = unoRequest.timeout;
try {
final request = await client.openUrl(unoRequest.method, unoRequest.uri);
for (var key in unoRequest.headers.keys) {
request.headers.set(key, unoRequest.headers[key]!);
}
request.add(unoRequest.bodyBytes);
final response = await request.close();
unoRequest.onDownloadProgress?.call(response.contentLength, 0);
var totalbytes = 0;
final mainStream = response.transform<List<int>>(
StreamTransformer.fromHandlers(
handleData: (value, sink) {
totalbytes += value.length;
unoRequest.onDownloadProgress
?.call(response.contentLength, totalbytes);
sink.add(value);
},
),
);
var data = await _convertResponseData(
mainStream, unoRequest.responseType, unoRequest);
final headers = <String, String>{};
response.headers.forEach((key, values) {
headers[key] = values.join(',');
});
final unoResponse = Response(
request: unoRequest,
status: response.statusCode,
data: data,
headers: headers,
);
return unoResponse;
} on SocketException catch (e, s) {
throw UnoError<SocketException>(
e.toString().replaceFirst('SocketException', ''),
stackTrace: s,
request: unoRequest,
data: e,
);
}
}
dynamic _convertResponseData(Stream<List<int>> mainStream,
ResponseType responseType, Request request) async {
if (responseType == ResponseType.json) {
try {
final buffer = StringBuffer();
await for (var item in mainStream.transform(utf8.decoder)) {
buffer.write(item);
}
return jsonDecode(buffer.toString());
} on FormatException catch (e, s) {
throw UnoError<FormatException>(
'Data body isn`t a json. Please, use other [ResponseType] in request.',
data: e,
request: request,
stackTrace: s,
);
}
} else if (responseType == ResponseType.plain) {
try {
final buffer = StringBuffer();
await for (var item in mainStream.transform(utf8.decoder)) {
buffer.write(item);
}
return buffer.toString();
} on FormatException catch (e, s) {
throw UnoError<FormatException>(
'Data body isn`t a plain text (String). Please, use other [ResponseType] in request.',
data: e,
request: request,
stackTrace: s,
);
}
} else if (responseType == ResponseType.arraybuffer) {
var bytes = <int>[];
await for (var b in mainStream) {
bytes.addAll(b);
}
return bytes;
} else if (responseType == ResponseType.stream) {
return mainStream;
}
}
}
| 29.327103 | 96 | 0.602932 | 3.109375 |
20b5a3837d98800d6af3db1760c1887348c065b8
| 9,262 |
py
|
Python
|
functions/socketio/video.py
|
VMAJSTER/openstreamingplatform
|
f002246db922dab9a3f019f46001f3901326feaf
|
[
"MIT"
] | null | null | null |
functions/socketio/video.py
|
VMAJSTER/openstreamingplatform
|
f002246db922dab9a3f019f46001f3901326feaf
|
[
"MIT"
] | null | null | null |
functions/socketio/video.py
|
VMAJSTER/openstreamingplatform
|
f002246db922dab9a3f019f46001f3901326feaf
|
[
"MIT"
] | null | null | null |
from flask import abort
from flask_security import current_user
from classes.shared import db, socketio
from classes import RecordedVideo
from classes import settings
from classes import notifications
from classes import subscriptions
from functions import system
from functions import webhookFunc
from functions import templateFilters
from functions import videoFunc
from functions import subsFunc
from app import r
@socketio.on('deleteVideo')
def deleteVideoSocketIO(message):
if current_user.is_authenticated:
videoID = int(message['videoID'])
result = videoFunc.deleteVideo(videoID)
if result is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('editVideo')
def editVideoSocketIO(message):
if current_user.is_authenticated:
videoID = int(message['videoID'])
videoName = system.strip_html(message['videoName'])
videoTopic = int(message['videoTopic'])
videoDescription = message['videoDescription']
videoAllowComments = False
if message['videoAllowComments'] == "True" or message['videoAllowComments'] == True:
videoAllowComments = True
result = videoFunc.changeVideoMetadata(videoID, videoName, videoTopic, videoDescription, videoAllowComments)
if result is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('createClip')
def createclipSocketIO(message):
if current_user.is_authenticated:
videoID = int(message['videoID'])
clipName = system.strip_html(message['clipName'])
clipDescription = message['clipDescription']
startTime = float(message['clipStart'])
stopTime = float(message['clipStop'])
result = videoFunc.createClip(videoID, startTime, stopTime, clipName, clipDescription)
if result[0] is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('moveVideo')
def moveVideoSocketIO(message):
if current_user.is_authenticated:
videoID = int(message['videoID'])
newChannel = int(message['destinationChannel'])
result = videoFunc.moveVideo(videoID, newChannel)
if result is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('togglePublished')
def togglePublishedSocketIO(message):
sysSettings = settings.settings.query.first()
if current_user.is_authenticated:
videoID = int(message['videoID'])
videoQuery = RecordedVideo.RecordedVideo.query.filter_by(owningUser=current_user.id, id=videoID).first()
if videoQuery is not None:
newState = not videoQuery.published
videoQuery.published = newState
if videoQuery.channel.imageLocation is None:
channelImage = (sysSettings.siteProtocol + sysSettings.siteAddress + "/static/img/video-placeholder.jpg")
else:
channelImage = (sysSettings.siteProtocol + sysSettings.siteAddress + "/images/" + videoQuery.channel.imageLocation)
if newState is True:
webhookFunc.runWebhook(videoQuery.channel.id, 6, channelname=videoQuery.channel.channelName,
channelurl=(sysSettings.siteProtocol + sysSettings.siteAddress + "/channel/" + str(videoQuery.channel.id)),
channeltopic=templateFilters.get_topicName(videoQuery.channel.topic),
channelimage=channelImage, streamer=templateFilters.get_userName(videoQuery.channel.owningUser),
channeldescription=str(videoQuery.channel.description), videoname=videoQuery.channelName,
videodate=videoQuery.videoDate, videodescription=str(videoQuery.description),
videotopic=templateFilters.get_topicName(videoQuery.topic),
videourl=(sysSettings.siteProtocol + sysSettings.siteAddress + '/play/' + str(videoQuery.id)),
videothumbnail=(sysSettings.siteProtocol + sysSettings.siteAddress + '/videos/' + str(videoQuery.thumbnailLocation)))
subscriptionQuery = subscriptions.channelSubs.query.filter_by(channelID=videoQuery.channel.id).all()
for sub in subscriptionQuery:
# Create Notification for Channel Subs
newNotification = notifications.userNotification(templateFilters.get_userName(videoQuery.channel.owningUser) + " has posted a new video to " + videoQuery.channel.channelName + " titled " + videoQuery.channelName, '/play/' + str(videoQuery.id), "/images/" + str(videoQuery.channel.owner.pictureLocation), sub.userID)
db.session.add(newNotification)
db.session.commit()
subsFunc.processSubscriptions(videoQuery.channel.id, sysSettings.siteName + " - " + videoQuery.channel.channelName + " has posted a new video", "<html><body><img src='" +
sysSettings.siteProtocol + sysSettings.siteAddress + sysSettings.systemLogo + "'><p>Channel " + videoQuery.channel.channelName + " has posted a new video titled <u>" +
videoQuery.channelName + "</u> to the channel.</p><p>Click this link to watch<br><a href='" + sysSettings.siteProtocol + sysSettings.siteAddress + "/play/" +
str(videoQuery.id) + "'>" + videoQuery.channelName + "</a></p>")
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('togglePublishedClip')
def togglePublishedClipSocketIO(message):
if current_user.is_authenticated:
clipID = int(message['clipID'])
clipQuery = RecordedVideo.Clips.query.filter_by(id=clipID).first()
if clipQuery is not None and current_user.id == clipQuery.recordedVideo.owningUser:
newState = not clipQuery.published
clipQuery.published = newState
if newState is True:
subscriptionQuery = subscriptions.channelSubs.query.filter_by(channelID=clipQuery.recordedVideo.channel.id).all()
for sub in subscriptionQuery:
# Create Notification for Channel Subs
newNotification = notifications.userNotification(templateFilters.get_userName(clipQuery.recordedVideo.owningUser) + " has posted a new clip to " +
clipQuery.recordedVideo.channel.channelName + " titled " + clipQuery.clipName,'/clip/' +
str(clipQuery.id),"/images/" + str(clipQuery.recordedVideo.channel.owner.pictureLocation), sub.userID)
db.session.add(newNotification)
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('editClip')
def changeClipMetadataSocketIO(message):
if current_user.is_authenticated:
clipID = int(message['clipID'])
clipName = message['clipName']
clipDescription = message['clipDescription']
result = videoFunc.changeClipMetadata(clipID, clipName, clipDescription)
if result is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
@socketio.on('deleteClip')
def deleteClipSocketIO(message):
if current_user.is_authenticated:
clipID = int(message['clipID'])
result = videoFunc.deleteClip(clipID)
if result is True:
db.session.commit()
db.session.close()
return 'OK'
else:
db.session.commit()
db.session.close()
return abort(500)
else:
db.session.commit()
db.session.close()
return abort(401)
| 41.164444 | 335 | 0.617361 | 3.0625 |
c3505840fa0bc4b40a2801adea31678f4873e3c3
| 2,993 |
cs
|
C#
|
EWKT.Tests/Parsers/EWKTParserTests.cs
|
0xRCE/EWKT
|
a6d4fa0303421675d4e7ae8f51098e60496b4259
|
[
"MIT"
] | null | null | null |
EWKT.Tests/Parsers/EWKTParserTests.cs
|
0xRCE/EWKT
|
a6d4fa0303421675d4e7ae8f51098e60496b4259
|
[
"MIT"
] | null | null | null |
EWKT.Tests/Parsers/EWKTParserTests.cs
|
0xRCE/EWKT
|
a6d4fa0303421675d4e7ae8f51098e60496b4259
|
[
"MIT"
] | null | null | null |
using EWKT.Parsers;
using EWKT.Primitives;
using Microsoft.VisualStudio.TestTools.UnitTesting;
//using NUnit.Framework;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace EWKT.Tests.Parsers
{
[TestClass]
public class EWKTParserTests
{
[TestMethod]
public void Test_EWKTParser_RootGeometry()
{
var ewkt = "POINT (10 20)";
var root = EWKTParser.Convert(ewkt);
Assert.IsNotNull(root);
Assert.IsInstanceOfType(root, typeof(PointZ));
}
[TestMethod]
public void Test_EWKTParser_Point_Negative_Coordinate()
{
var ewkt = "POINT(-1 2.0)";
var parser = EWKTParser.CreateParser(ewkt);
var geom = parser.Parse();
Assert.AreEqual("POINT", geom.Name);
Assert.IsNotNull(geom);
var coordinates = geom.Coordinates.ToList();
Assert.AreEqual(1, coordinates.Count);
Assert.AreEqual("-1 2.0", coordinates[0].Set);
}
[TestMethod]
public void Test_EWKTParser_PointGeometry_Negative_Coordinate()
{
var ewkt = "POINT(-1 2.0)";
var geom = EWKTParser.Convert(ewkt) as PointZ;
Assert.IsNotNull(geom);
var point = geom.Coordinate;
Assert.AreEqual(-1, point.X);
Assert.AreEqual(2.0d, point.Y);
}
[TestMethod]
public void Test_EWKTParser_Null()
{
var ewkt = (string)null;
var geom = EWKTParser.Convert(ewkt);
Assert.IsNull(geom);
}
[TestMethod]
public void Test_EWKTParser_Simple_Polygon()
{
var ewkt = "POLYGON Z((30 10 1,40 40 1,20 40 1,10 20 1,30 10 1))";
var parser = EWKTParser.CreateParser(ewkt);
var geom = parser.Parse();
Assert.AreEqual("POLYGON Z", geom.Name);
Assert.IsNotNull(geom);
var coordinates = geom.Children.First().Coordinates.ToList();
Assert.AreEqual(1, coordinates.Count);
Assert.AreEqual("30 10 1, 40 40 1, 20 40 1, 10 20 1, 30 10 1", coordinates[0].Set);
}
[TestMethod]
public void Test_EWKTParser_Complex_CurvePolygon()
{
var ewkt = "CURVEPOLYGON(CIRCULARSTRING(1 3, 3 5, 4 7, 7 3, 1 3))";
var parser = EWKTParser.CreateParser(ewkt);
var geom = parser.Parse();
Assert.AreEqual("CURVEPOLYGON", geom.Name);
Assert.IsNotNull(geom);
var coordinates = geom.Coordinates.ToList();
Assert.AreEqual(0, coordinates.Count);
var child = geom.Children.FirstOrDefault();
Assert.IsNotNull(child);
coordinates = child.Coordinates.ToList();
Assert.AreEqual(1, coordinates.Count);
Assert.AreEqual("1 3, 3 5, 4 7, 7 3, 1 3", coordinates[0].Set);
}
}
}
| 29.93 | 95 | 0.571667 | 3.015625 |
f46e4ecb59fec9bec762e2ac163cdcaaaa6758e3
| 1,126 |
cs
|
C#
|
Scripts/Functions/Math.cs
|
M-T-Asagi/ScriptsUnityUtil
|
52d5897486d62ed3ed235f87053904c62d80e44a
|
[
"MIT"
] | null | null | null |
Scripts/Functions/Math.cs
|
M-T-Asagi/ScriptsUnityUtil
|
52d5897486d62ed3ed235f87053904c62d80e44a
|
[
"MIT"
] | null | null | null |
Scripts/Functions/Math.cs
|
M-T-Asagi/ScriptsUnityUtil
|
52d5897486d62ed3ed235f87053904c62d80e44a
|
[
"MIT"
] | null | null | null |
using UnityEngine;
namespace AsagiHandyScripts
{
static public class Math
{
static public Vector2Int GreatestCommonResolution(int maxOfPixels, Vector2Int originalResolution)
{
int gcd = GetGreatestCommonDivisor(originalResolution.x, originalResolution.y);
Vector2Int aspect = originalResolution;
aspect.x /= gcd;
aspect.y /= gcd;
float magni = Mathf.Sqrt((float)maxOfPixels / (float)(aspect.x * aspect.y));
return new Vector2Int(Mathf.CeilToInt(aspect.x * magni), Mathf.CeilToInt(aspect.y * magni));
}
static public int GetGreatestCommonDivisor(int a, int b)
{
int big = Mathf.Max(a, b);
int small = Mathf.Min(a, b);
if (small == 0)
return big;
return GetGreatestCommonDivisor(small, big % small);
}
static public int PowInt(int x, int y)
{
int result = 1;
for (int i = 0; i < y; i++)
{
result *= x;
}
return result;
}
}
}
| 28.15 | 105 | 0.539964 | 3.15625 |
e2f5e9c8a09680181641113cfaa9943a094941ba
| 2,609 |
py
|
Python
|
test/generate_tones.py
|
merlinran/acorn-precision-farming-rover
|
228bbeb537550df79ae57985c427975ffa828bcd
|
[
"Apache-2.0"
] | 143 |
2021-02-23T16:17:32.000Z
|
2022-03-30T09:42:27.000Z
|
test/generate_tones.py
|
Twisted-Fields/acorn-precision-farming-rover
|
228bbeb537550df79ae57985c427975ffa828bcd
|
[
"Apache-2.0"
] | 19 |
2021-05-13T19:03:21.000Z
|
2022-03-25T08:46:44.000Z
|
test/generate_tones.py
|
merlinran/acorn-precision-farming-rover
|
228bbeb537550df79ae57985c427975ffa828bcd
|
[
"Apache-2.0"
] | 17 |
2021-02-23T22:02:24.000Z
|
2022-03-20T15:12:20.000Z
|
from tones import SINE_WAVE, SAWTOOTH_WAVE
from tones.mixer import Mixer
# Create mixer, set sample rate and amplitude
mixer = Mixer(44100, 0.5)
# Create two monophonic tracks that will play simultaneously, and set
# initial values for note attack, decay and vibrato frequency (these can
# be changed again at any time, see documentation for tones.Mixer
mixer.create_track(0, SAWTOOTH_WAVE, vibrato_frequency=20.0,
vibrato_variance=30.0, attack=0.01, decay=0.1)
#mixer.create_track(1, SINE_WAVE, attack=0.01, decay=0.1)
# Add a 1-second tone on track 0, slide pitch from c# to f#)
mixer.add_note(0, note='c#', octave=5, duration=1.0, endnote='f#')
# Add a 1-second tone on track 1, slide pitch from f# to g#)
# mixer.add_note(0, note='f#', octave=5, duration=1.0, endnote='g#')
# Mix all tracks into a single list of samples and write to .wav file
mixer.write_wav('complete.wav')
# Create mixer, set sample rate and amplitude
mixer = Mixer(44100, 0.5)
# Create two monophonic tracks that will play simultaneously, and set
# initial values for note attack, decay and vibrato frequency (these can
# be changed again at any time, see documentation for tones.Mixer
#mixer.create_track(0, SAWTOOTH_WAVE, vibrato_frequency=7.0, vibrato_variance=30.0, attack=0.01, decay=0.1)
mixer.create_track(0, SINE_WAVE, attack=0.01, decay=0.1)
# Add a 1-second tone on track 0, slide pitch from c# to f#)
mixer.add_note(0, note='a', octave=5, duration=0.25,
endnote='a', vibrato_frequency=7.0)
# Add a 1-second tone on track 1, slide pitch from f# to g#)
# mixer.add_note(0, note='c', octave=5, duration=1.0, endnote='a')
# Mix all tracks into a single list of samples and write to .wav file
mixer.write_wav('wait.wav')
# Create mixer, set sample rate and amplitude
mixer = Mixer(44100, 0.5)
# Create two monophonic tracks that will play simultaneously, and set
# initial values for note attack, decay and vibrato frequency (these can
# be changed again at any time, see documentation for tones.Mixer
mixer.create_track(0, SAWTOOTH_WAVE, vibrato_frequency=7.0,
vibrato_variance=30.0, attack=0.01, decay=0.1)
#mixer.create_track(0, SINE_WAVE, attack=0.01, decay=0.1)
# Add a 1-second tone on track 0, slide pitch from c# to f#)
mixer.add_note(0, note='f', octave=5, duration=4.0,
endnote='g', vibrato_frequency=2.0)
# Add a 1-second tone on track 1, slide pitch from f# to g#)
# mixer.add_note(0, note='c', octave=5, duration=1.0, endnote='a')
# Mix all tracks into a single list of samples and write to .wav file
mixer.write_wav('error.wav')
| 41.412698 | 107 | 0.721732 | 3.375 |
e2da828e91dc9f0d6b0dadf185a42c8037de7454
| 4,888 |
py
|
Python
|
preprocessing/functions/data_formatter.py
|
andygubser/data_visualisation
|
406b595025f29e48bac7effdb9e9dfb19809f371
|
[
"MIT"
] | null | null | null |
preprocessing/functions/data_formatter.py
|
andygubser/data_visualisation
|
406b595025f29e48bac7effdb9e9dfb19809f371
|
[
"MIT"
] | null | null | null |
preprocessing/functions/data_formatter.py
|
andygubser/data_visualisation
|
406b595025f29e48bac7effdb9e9dfb19809f371
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
pd.options.display.max_rows = 500
class DataFormatter:
@classmethod
def format_data(cls, df):
df = cls._format_columns(df)
df = cls._format_booleans(df)
df = cls._format_numericals(df)
df = cls._format_rows(df)
df = cls._format_datetimes(df)
df = cls._format_age(df)
# df = cls._format_type(df)
df = cls._format_fatal(df)
return df.reset_index(drop=True)
@classmethod
def _format_columns(cls, df):
df.dropna(how="all", axis="columns", inplace=True)
df.columns = map(cls._camel_to_snake, df.columns)
unnamed_cols = df.columns[df.columns.str.contains('unnamed')].tolist()
df.drop(unnamed_cols, axis="columns", inplace=True)
df.rename(columns={"fatal(y/n)": "fatal"}, inplace=True)
# df = df.reindex(sorted(df.columns), axis="columns")
return df
@classmethod
def _format_rows(cls, df):
df["case_number"] = df["case_number"].replace(0, np.nan)
df = df[~df["case_number"].isna()]
return df
@classmethod
def _camel_to_snake(cls, single_column_name):
import re
single_column_name = single_column_name.replace(" ", "").replace(".", "_")
single_column_name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', single_column_name)
single_column_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', single_column_name).lower()
return single_column_name
@classmethod
def _format_booleans(cls, df):
return df.replace(["false", "true"], [False, True])
@classmethod
def _format_numericals(cls, df):
params = list([df.columns[df.columns.str.startswith("param_")],
df.columns[df.columns.str.startswith("kpi_")],
df.columns[df.columns.str.startswith("recipe_")]
])
params = [item for sublist in params for item in sublist]
params = [x for x in params if 'timestamp' not in x]
df[params] = df[params].apply(pd.to_numeric, downcast="signed", errors="ignore")
return df
@classmethod
def _format_date(cls, df):
df["date_prep"] = df["date"].str.lower()
df["date_prep"] = df["date_prep"].str.strip()
df["date_prep"] = df["date_prep"].str.replace("reported", "")
df["date_prep"] = df["date_prep"].str.replace("before", "")
df["date_prep"] = pd.to_datetime(df["date_prep"], errors="ignore")
return df
@classmethod
def _format_year(cls, df):
df["year_prep"] = df["year"].replace(0.0, np.nan)
df["year_from_casenumber"] = df["case_number"].str.findall('\d{4}').str[0]
df["year_from_casenumber"] = pd.to_numeric(df["year_from_casenumber"])
df["year_from_casenumber"] = df["year_from_casenumber"].mask(df["year_from_casenumber"] < 1000)
df["year_from_date"] = df["date"].str.findall('\d{4}').str[-1]
df["year_from_date"] = pd.to_numeric(df["year_from_date"])
df.loc[df["year_prep"].isna(), 'year_prep'] = df["year_from_date"]
df.loc[df["year_prep"].isna(), 'year_prep'] = df["year_from_casenumber"]
return df
@classmethod
def _format_datetimes(cls, df):
df = cls._format_date(df)
df = cls._format_year(df)
return df
@classmethod
def _format_age(cls, df):
df["age_prep"] = df["age"].astype(str).str.findall('\d+').str[0]
df["age_prep"] = pd.to_numeric(df["age_prep"], errors="ignore")
# df.loc[df["age_prep"].str.len() == 0, "age_prep"]
return df
@classmethod
def _format_type(cls, df):
type_dict = dict(zip(["Unprovoked", "Provoked"], [0, 1]))
df["type_cat"] = df["type"].replace(type_dict)
return df
@classmethod
def _format_fatal(cls, df):
df["fatal_cat"] = df["fatal"].str.strip().str.lower().str.replace("m", "n").replace(["n", "y"], [0, 1])
return df
# @classmethod
# def _unstack_list(cls, df, col_to_concatenate):
# cols_all = set(df.columns)
# cols_to_repeat = cols_all.difference(col_to_concatenate)
# number_of_lists_per_row = df[col_to_concatenate].str.len().fillna(1)
#
# df_unstacked = pd.DataFrame({
# col: np.repeat(df[col].values, number_of_lists_per_row) for col in cols_to_repeat
# }).assign(**{col_to_concatenate: np.concatenate(df[col_to_concatenate].values)})[df.columns.tolist()]
#
# # df = pd.DataFrame({cols_to_repeat:
# # np.repeat(
# # df[cols_to_repeat].values,
# # df[col_to_concatenate].str.len()),
# # col_to_concatenate:
# # np.concatenate(df[col_to_concatenate].values)})
# return df
| 38.488189 | 111 | 0.588175 | 3.125 |
2867fd5310ab9b6d2d2980404e2b505963bcc694
| 1,105 |
swift
|
Swift
|
RickAndMortyApp/NetworkService/RMProvider.swift
|
zontag/RickAndMortyApp
|
67bc4aea449264ebb260324f869a6827378eabb9
|
[
"MIT"
] | null | null | null |
RickAndMortyApp/NetworkService/RMProvider.swift
|
zontag/RickAndMortyApp
|
67bc4aea449264ebb260324f869a6827378eabb9
|
[
"MIT"
] | null | null | null |
RickAndMortyApp/NetworkService/RMProvider.swift
|
zontag/RickAndMortyApp
|
67bc4aea449264ebb260324f869a6827378eabb9
|
[
"MIT"
] | null | null | null |
import Foundation
import Moya
import ReactiveSwift
import ReactiveMoya
class RMProvider {
enum Failure: LocalizedError {
case fetchFailure
var localizedDescription: String {
switch self {
case .fetchFailure:
return "Sorry, something really strange happened. 🤯"
}
}
}
static let shared = RMProvider()
private lazy var rmService: MoyaProvider<RMService> = appEnvironment.resolve()
private init () { }
func fetchCharacters(
page: Int,
name: String,
status: String,
gender: String,
species: String)
-> SignalProducer<([Character], Int), Failure> {
rmService.reactive.request(.characters(
page: page,
name: name,
status: status,
gender: gender,
species: species))
.map(Response<Character>.self)
.map { ($0.results, $0.info.pages) }
.mapError { _ in return Failure.fetchFailure }
}
}
| 25.697674 | 82 | 0.534842 | 3.015625 |
144e8563d356054ea2b723034e4066aa0ed2013d
| 10,574 |
ts
|
TypeScript
|
src/tokens/parsers.ts
|
functional-script/fscript
|
ff25134cc9a7f4322223f3290a15049b7c30759a
|
[
"MIT"
] | null | null | null |
src/tokens/parsers.ts
|
functional-script/fscript
|
ff25134cc9a7f4322223f3290a15049b7c30759a
|
[
"MIT"
] | null | null | null |
src/tokens/parsers.ts
|
functional-script/fscript
|
ff25134cc9a7f4322223f3290a15049b7c30759a
|
[
"MIT"
] | null | null | null |
import { TokenParser, Token, KEYWORD_LITERAL } from './types'
import { TokenList } from './list'
import { TokenError } from './error'
import { CompilerOptions } from '../options'
/**
* Allows to parse a new line token
*/
export class NewLineToken implements TokenParser {
constructor(options: CompilerOptions) {}
get re(): RegExp {
return /^(\r|\n)/
}
static get ID(): string {
return 'NEW_LINE'
}
public supports(code: string): boolean {
return code.trim() === '' || this.re.test(code)
}
public parse(code: string, list: TokenList): Token {
let lastToken = list.hasLast ? list.last : null
return {
name: NewLineToken.ID,
value: '\n',
rawValue: '\n',
position: {
line: lastToken
? lastToken.name === NewLineToken.ID
? lastToken.position.line + 1
: lastToken.position.line
: 1,
start: lastToken
? lastToken.name === NewLineToken.ID
? 0
: lastToken.position.end + 1
: 0,
end: lastToken
? lastToken.name === NewLineToken.ID
? 0
: lastToken.position.end + 1
: 0,
},
}
}
public substract(code: string, tokens: TokenList): string {
if (this.re.test(code)) {
return code.replace(this.re, '')
}
return code.trim()
}
}
/**
* Allow to parse an indentation
*/
export class IndentToken implements TokenParser {
private kind: CompilerOptions['indentKind']
private size: CompilerOptions['indentSize']
constructor(options: CompilerOptions) {
this.kind = options.indentKind
this.size = options.indentSize
}
static get ID(): string {
return 'INDENT'
}
public supports(code: string, list: TokenList): boolean {
let re =
this.kind === 'space'
? new RegExp(`^${' '.repeat(this.size)}`)
: new RegExp('^\t')
let lastToken = list.hasLast ? list.last : null
if (!lastToken) {
return re.test(code) && list.length === 0
}
return (
re.test(code) &&
(lastToken.name === NewLineToken.ID || lastToken.name === IndentToken.ID)
)
}
public parse(code: string, list: TokenList): Token {
let re =
this.kind === 'space'
? new RegExp(`^${' '.repeat(this.size)}`)
: new RegExp('^\t')
let match = code.match(re)
let position = list.calculateNextPostion(match ? match[0] : { length: 0 })
if (!match) {
throw new TokenError(IndentToken.ID, code, position)
}
return {
name: IndentToken.ID,
value: match[0].length,
rawValue: match[0],
position,
}
}
public substract(code: string): string {
let re =
this.kind === 'space'
? new RegExp(`^${' '.repeat(this.size)}`)
: new RegExp('^\t')
return code.replace(re, '')
}
}
/**
* Allows to parse spaces
*/
export class SpaceToken implements TokenParser {
constructor(options: CompilerOptions) {}
static get ID(): string {
return 'SPACE'
}
public supports(code: string, list: TokenList): boolean {
return /^ /.test(code)
}
public parse(code: string, list: TokenList): Token {
let match = code.match(/^ +/)
let position = list.calculateNextPostion(match ? match[0] : { length: 0 })
if (!match) {
throw new TokenError(SpaceToken.ID, code, position)
}
return {
name: SpaceToken.ID,
value: ' ',
rawValue: match[0],
position,
}
}
public substract(line: string): string {
return line.replace(/^ +/, '')
}
}
/**
* Parse an fscript keyword
*/
export class KeywordToken implements TokenParser {
constructor(options: CompilerOptions) {}
static get ID(): string {
return 'KEYWORD'
}
private re =
/^(def|var|let|const|class|type|interface|if|else|for|while|do|throw|new|async|await|yield|return|then|import|export|from|as|in|with|function|and|or)/
public supports(code: string): boolean {
return this.re.test(code)
}
public parse(code: string, list: TokenList): Token {
let match = code.match(this.re)
let position = list.calculateNextPostion(match ? match[1] : { length: 0 })
if (!match) {
throw new TokenError(KeywordToken.ID, code, position)
}
return {
name: KeywordToken.ID,
value: match[1],
rawValue: match[1],
position,
}
}
public substract(code: string): string {
return code.replace(this.re, '')
}
}
/**
* Parse an fscript separator
*/
export class SeparatorToken implements TokenParser {
constructor(options: CompilerOptions) {}
static get ID(): string {
return 'SEPARATOR'
}
private re = /^(,)/
public supports(code: string): boolean {
return this.re.test(code)
}
public parse(code: string, list: TokenList): Token {
let match = code.match(this.re)
let position = list.calculateNextPostion(match ? match[1] : { length: 0 })
if (!match) {
throw new TokenError(SeparatorToken.ID, code, position)
}
return {
name: SeparatorToken.ID,
value: match[1],
rawValue: match[1],
position,
}
}
public substract(code: string): string {
return code.replace(this.re, '')
}
}
/**
* Parse any fscript operator
*/
export class OperatorToken implements TokenParser {
constructor(options: CompilerOptions) {}
static get ID(): string {
return 'OPERATOR'
}
private re =
/^(:|=>|\+|\->|\-|\*|\/|\.\.\.|\.\.|\.|=>|<=|===|==|=|is|and|or|not|!=|!==|!|gte|lte|gt|lt|eq)/
public supports(code: string): boolean {
return this.re.test(code)
}
public parse(code: string, list: TokenList): Token {
let match = code.match(this.re)
let position = list.calculateNextPostion(match ? match[1] : { length: 0 })
if (!match) {
throw new TokenError(OperatorToken.ID, code, position)
}
return {
name: OperatorToken.ID,
value: match[1],
rawValue: match[1],
position,
}
}
public substract(code: string): string {
return code.replace(this.re, '')
}
}
/**
* Parse an fscript identifier
*/
export class IdentifierToken implements TokenParser {
constructor(options: CompilerOptions) {}
static get ID(): string {
return 'IDENTIFIER'
}
private re = /^([a-zA-Z][a-zA-Z0-9_-]*)/
public supports(code: string): boolean {
return this.re.test(code)
}
public parse(code: string, list: TokenList): Token {
let match = code.match(this.re)
let position = list.calculateNextPostion(match ? match[1] : { length: 0 })
if (!match) {
throw new TokenError(IdentifierToken.ID, code, position)
}
return {
name: IdentifierToken.ID,
value: match[1],
rawValue: match[1],
position,
}
}
public substract(code: string): string {
return code.replace(this.re, '')
}
}
/**
* Parse an fscript literral
*/
export class LiteralToken implements TokenParser {
private re: RegExp
constructor(options: CompilerOptions) {
this.re = new RegExp(
`^(([0-9.]+)|("[^"]*")|('[^']*')|(\`[^\`]*\`)|(${KEYWORD_LITERAL.join(
'|',
)}))`,
)
}
static get ID(): string {
return 'LITTERAL'
}
public supports(code: string): boolean {
return this.re.test(code)
}
public parse(code: string, list: TokenList): Token {
let match = code.match(this.re)
let position = list.calculateNextPostion(match ? match[1] : { length: 0 })
if (!match) {
throw new TokenError(LiteralToken.ID, code, position)
}
return {
name: LiteralToken.ID,
value: match[1],
rawValue: match[1],
position,
}
}
public substract(code: string): string {
return code.replace(this.re, '')
}
}
/**
* Parse an fscript group using parenthesis
*/
export class GroupToken implements TokenParser {
constructor(options: CompilerOptions) {}
static get ID_START(): string {
return 'GROUP_START'
}
static get ID_END(): string {
return 'GROUP_END'
}
private re = /^(\)|\()/
public supports(code: string): boolean {
return this.re.test(code)
}
public parse(code: string, list: TokenList): Token {
let match = code.match(this.re)
let position = list.calculateNextPostion(match ? match[1] : { length: 0 })
if (!match) {
throw new TokenError(
`${GroupToken.ID_START}|${GroupToken.ID_END}`,
code,
position,
)
}
return {
name: match[1] === ')' ? GroupToken.ID_END : GroupToken.ID_START,
value: match[1],
rawValue: match[1],
position,
}
}
public substract(code: string): string {
return code.replace(this.re, '')
}
}
/**
* Parse instruction block or objet group definitions
* "{}"
*/
export class BlockToken implements TokenParser {
constructor(options: CompilerOptions) {}
static get ID_START(): string {
return 'BLOCK_START'
}
static get ID_END(): string {
return 'BLOCK_END'
}
private re = /^(\}|\{)/
public supports(code: string): boolean {
return this.re.test(code)
}
public parse(code: string, list: TokenList): Token {
let match = code.match(this.re)
let position = list.calculateNextPostion(match ? match[1] : { length: 0 })
if (!match) {
throw new TokenError(
`${BlockToken.ID_START}|${BlockToken.ID_END}`,
code,
position,
)
}
return {
name: match[1] === '}' ? BlockToken.ID_END : BlockToken.ID_START,
value: match[1],
rawValue: match[1],
position,
}
}
public substract(code: string): string {
return code.replace(this.re, '')
}
}
/**
* Parse array bracket syntax "[]"
*/
export class ArrayToken implements TokenParser {
constructor(options: CompilerOptions) {}
static get ID_START(): string {
return 'ARRAY_START'
}
static get ID_END(): string {
return 'ARRAY_END'
}
private re = /^(\]|\[)/
public supports(code: string): boolean {
return this.re.test(code)
}
public parse(code: string, list: TokenList): Token {
let match = code.match(this.re)
let position = list.calculateNextPostion(match ? match[1] : { length: 0 })
if (!match) {
throw new TokenError(
`${ArrayToken.ID_START}|${ArrayToken.ID_END}`,
code,
position,
)
}
return {
name: match[1] === ']' ? ArrayToken.ID_END : ArrayToken.ID_START,
value: match[1],
rawValue: match[1],
position,
}
}
public substract(code: string): string {
return code.replace(this.re, '')
}
}
| 21.712526 | 154 | 0.597882 | 3.234375 |
ec6f0dddc3a4c8a09f5ac89ce0a6de2c5f94d1e7
| 10,058 |
lua
|
Lua
|
tests/lutl_assert_tests.lua
|
Saend/cutl
|
31c091ec3155185a7764682654ad9ed3932f6267
|
[
"MIT"
] | null | null | null |
tests/lutl_assert_tests.lua
|
Saend/cutl
|
31c091ec3155185a7764682654ad9ed3932f6267
|
[
"MIT"
] | null | null | null |
tests/lutl_assert_tests.lua
|
Saend/cutl
|
31c091ec3155185a7764682654ad9ed3932f6267
|
[
"MIT"
] | null | null | null |
require('tests/tests')
local T = {}
-- ASSERT_TRUE
function T.assert_true_true_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_true(true)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_true_number_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_true(0)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_true_string_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_true('foo')
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_true_table_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_true({})
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_true_userdata_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_true(lutl)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_true_false_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_true(false)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_true_nil_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_true(nil)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
-- ASSERT_FALSE
function T.assert_false_false_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_false(false)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_false_nil_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_false(nil)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_false_true_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_false(true)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_false_number_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_false(0)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_false_string_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_false('foo')
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_false_table_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_false({})
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_false_userdata_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_false(lutl)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
-- ASSERT_EQUAL
function T.assert_equal_numbers_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_equal(10, 6+4)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_equal_strings_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_equal('foobar', 'foo'..'bar')
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_equal_userdata_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_equal(lutl, lutl)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_equal_tables_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_equal({}, {})
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_equal_numbers2_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_equal(11, 6+4)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_equal_strings2_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_equal('foobar', 'Foo'..'bar')
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_equal_number_string_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_equal(36, '36')
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_equal_number_bool_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_equal(0, false)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_equal_nil_false_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_equal(nil, false)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
-- ASSERT_NIL
function T.assert_nil_nil_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_nil(nil)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_nil_undefined_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_nil(something)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_nil_false_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_nil(false)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_nil_number_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_nil(0)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_nil_string_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_nil('foo')
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_nil_table_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_nil({})
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_nil_userdata_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_nil(lutl)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
-- ASSERT_NOT_NIL
function T.assert_notnil_false_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_notnil(false)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_notnil_number_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_notnil(0)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_notnil_string_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_notnil('foo')
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_notnil_table_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_notnil({})
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_notnil_userdata_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_notnil(lutl)
end)
-- Asserts
lutl:assert_equal(fix.lutl:get_failed(), 0)
end
function T.assert_notnil_nil_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_notnil(nil)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
function T.assert_notnil_undefined_test(lutl, fix)
-- Function under test
fix.lutl:run(nil, function(lutl)
lutl:assert_notnil(something)
end)
-- Asserts
lutl:assert_false(fix.lutl:get_error())
lutl:assert_equal(fix.lutl:get_failed(), 1)
end
-- ASSERT SUITE
return function(lutl)
lutl:at_start(fixture_setup)
lutl:at_end(fixture_clean)
lutl:test(T, 'assert_true_true_test')
lutl:test(T, 'assert_true_number_test')
lutl:test(T, 'assert_true_string_test')
lutl:test(T, 'assert_true_table_test')
lutl:test(T, 'assert_true_userdata_test')
lutl:test(T, 'assert_true_false_test')
lutl:test(T, 'assert_true_nil_test')
lutl:test(T, 'assert_false_false_test')
lutl:test(T, 'assert_false_nil_test')
lutl:test(T, 'assert_false_true_test')
lutl:test(T, 'assert_false_number_test')
lutl:test(T, 'assert_false_string_test')
lutl:test(T, 'assert_false_table_test')
lutl:test(T, 'assert_false_userdata_test')
lutl:test(T, 'assert_equal_numbers_test')
lutl:test(T, 'assert_equal_strings_test')
lutl:test(T, 'assert_equal_userdata_test')
lutl:test(T, 'assert_equal_tables_test')
lutl:test(T, 'assert_equal_numbers2_test')
lutl:test(T, 'assert_equal_strings2_test')
lutl:test(T, 'assert_equal_number_string_test')
lutl:test(T, 'assert_equal_number_bool_test')
lutl:test(T, 'assert_equal_nil_false_test')
lutl:test(T, 'assert_nil_nil_test')
lutl:test(T, 'assert_nil_undefined_test')
lutl:test(T, 'assert_nil_false_test')
lutl:test(T, 'assert_nil_number_test')
lutl:test(T, 'assert_nil_string_test')
lutl:test(T, 'assert_nil_table_test')
lutl:test(T, 'assert_nil_userdata_test')
lutl:test(T, 'assert_notnil_nil_test')
lutl:test(T, 'assert_notnil_undefined_test')
lutl:test(T, 'assert_notnil_false_test')
lutl:test(T, 'assert_notnil_number_test')
lutl:test(T, 'assert_notnil_string_test')
lutl:test(T, 'assert_notnil_table_test')
lutl:test(T, 'assert_notnil_userdata_test')
end
| 21.537473 | 53 | 0.747664 | 3.203125 |
e222e3ee992c49276a838ad051cb0ebb5f371a0d
| 1,618 |
py
|
Python
|
display_ssd1306.py
|
mkvenkit/simple_audio_pi
|
826d8ef9adfd480ac7676b34e10b8027441fe79c
|
[
"MIT"
] | 9 |
2021-02-20T02:54:35.000Z
|
2022-02-15T16:36:49.000Z
|
display_ssd1306.py
|
mkvenkit/simple_audio_pi
|
826d8ef9adfd480ac7676b34e10b8027441fe79c
|
[
"MIT"
] | null | null | null |
display_ssd1306.py
|
mkvenkit/simple_audio_pi
|
826d8ef9adfd480ac7676b34e10b8027441fe79c
|
[
"MIT"
] | 1 |
2021-09-02T09:58:52.000Z
|
2021-09-02T09:58:52.000Z
|
"""
display_ssd1306.py
Helper class for Adafruit_SSD1306. Displays text on 128x32 OLED display.
Author: Mahesh Venkitachalam
Website: electronut.in
"""
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
class SSD1306_Display:
"""Helper class to use OLED display"""
def __init__(self):
# 128x32 display with hardware I2C:
self.disp = Adafruit_SSD1306.SSD1306_128_32(rst=None)
# Initialize library.
self.disp.begin()
# Clear display.
self.disp.clear()
self.disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
self.image = Image.new('1', (self.disp.width, self.disp.height))
# Get drawing object to draw on image.
self.draw = ImageDraw.Draw(self.image)
# Draw a black filled box to clear the image.
self.draw.rectangle((0,0,self.disp.width,self.disp.height), outline=0, fill=0)
# Load default font.
self.font = ImageFont.load_default()
def show_txt(self, x, y, str_data, clear_display):
"""display given text"""
# clear old data if flag set
if clear_display:
# Draw a black filled box to clear the image.
self.draw.rectangle((0,0,self.disp.width, self.disp.height),
outline=0, fill=0)
self.draw.text((x,y), str_data, font=self.font, fill=255)
# Display image.
self.disp.image(self.image)
self.disp.display()
| 33.020408 | 86 | 0.630408 | 3.0625 |
e280c8817629d452d98a7ce7731e4614c914e1a5
| 1,324 |
py
|
Python
|
dbdaora/sorted_set/entity.py
|
dutradda/sqldataclass
|
5c87a3818e9d736bbf5e1438edc5929a2f5acd3f
|
[
"MIT"
] | 21 |
2019-10-14T14:33:33.000Z
|
2022-02-11T04:43:07.000Z
|
dbdaora/sorted_set/entity.py
|
dutradda/sqldataclass
|
5c87a3818e9d736bbf5e1438edc5929a2f5acd3f
|
[
"MIT"
] | null | null | null |
dbdaora/sorted_set/entity.py
|
dutradda/sqldataclass
|
5c87a3818e9d736bbf5e1438edc5929a2f5acd3f
|
[
"MIT"
] | 1 |
2019-09-29T23:51:44.000Z
|
2019-09-29T23:51:44.000Z
|
from typing import ( # type: ignore
Any,
Dict,
Optional,
Protocol,
Sequence,
Tuple,
Type,
TypeVar,
TypedDict,
Union,
_TypedDictMeta,
)
from dbdaora.data_sources.memory import RangeOutput
from dbdaora.entity import init_subclass
SortedSetInput = Sequence[Union[str, float]]
SortedSetData = Union[RangeOutput, SortedSetInput]
class SortedSetEntityProtocol(Protocol):
data: SortedSetData
max_size: Optional[int] = None
def __init__(
self,
*,
data: SortedSetData,
max_size: Optional[int] = None,
**kwargs: Any,
):
...
class SortedSetEntity(SortedSetEntityProtocol):
data: SortedSetData
max_size: Optional[int] = None
def __init_subclass__(cls) -> None:
init_subclass(cls, (SortedSetEntity,))
class SortedSetDictEntityMeta(_TypedDictMeta): # type: ignore
def __init__(
cls, name: str, bases: Tuple[Type[Any], ...], attrs: Dict[str, Any]
):
super().__init__(name, bases, attrs)
init_subclass(cls, bases)
class SortedSetDictEntity(TypedDict, metaclass=SortedSetDictEntityMeta):
data: SortedSetData
max_size: Optional[int]
SortedSetEntityHint = TypeVar(
'SortedSetEntityHint',
bound=Union[SortedSetEntityProtocol, SortedSetDictEntity],
)
| 21.015873 | 75 | 0.674471 | 3.078125 |
dbf9fd7a0551ac94c5f54d4eae43b36c79d2fcd3
| 28,041 |
php
|
PHP
|
application/models/discount/Discount_rule_model.php
|
sutouch08/wms
|
42d4b3fc459837ecd9d0ca0c32a0043061b3511f
|
[
"MIT"
] | null | null | null |
application/models/discount/Discount_rule_model.php
|
sutouch08/wms
|
42d4b3fc459837ecd9d0ca0c32a0043061b3511f
|
[
"MIT"
] | null | null | null |
application/models/discount/Discount_rule_model.php
|
sutouch08/wms
|
42d4b3fc459837ecd9d0ca0c32a0043061b3511f
|
[
"MIT"
] | null | null | null |
<?php
class Discount_rule_model extends CI_Model
{
public function __construct()
{
parent::__construct();
}
public function add(array $ds = array())
{
$rs = $this->db->insert('discount_rule', $ds);
if($rs)
{
return $this->db->insert_id();
}
return FALSE;
}
public function update($id, array $ds = array())
{
if(!empty($ds))
{
return $this->db->where('id', $id)->update('discount_rule', $ds);
}
return FALSE;
}
public function get($id)
{
$rs = $this->db->where('id', $id)->get('discount_rule');
if($rs->num_rows() == 1)
{
return $rs->row();
}
return array();
}
public function get_policy_id($id)
{
$rs = $this->db->select('id_policy')->where('id', $id)->get('discount_rule');
if($rs->num_rows() === 1)
{
return $rs->row()->id_policy;
}
return NULL;
}
/*
|----------------------------------
| BEGIN ใช้สำหรับแสดงรายละเอียดในหน้าพิมพ์
|----------------------------------
*/
public function getCustomerRuleList($id)
{
$qr = "SELECT cs.code, cs.name FROM discount_rule_customer AS cr ";
$qr .= "JOIN customers AS cs ON cr.customer_code = cs.code ";
$qr .= "WHERE cr.id_rule = ".$id;
return $this->db->query($qr);
}
public function getCustomerGroupRule($id)
{
$qr = "SELECT cs.code, cs.name FROM discount_rule_customer_group AS cr ";
$qr .= "JOIN customer_group AS cs ON cr.group_code = cs.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getCustomerTypeRule($id)
{
$qr = "SELECT cs.code, cs.code, cs.name FROM discount_rule_customer_type AS cr ";
$qr .= "JOIN customer_type AS cs ON cr.type_code = cs.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getCustomerKindRule($id)
{
$qr = "SELECT cs.code, cs.code, cs.name FROM discount_rule_customer_kind AS cr ";
$qr .= "JOIN customer_kind AS cs ON cr.kind_code = cs.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getCustomerAreaRule($id)
{
$qr = "SELECT cs.code, cs.code, cs.name FROM discount_rule_customer_area AS cr ";
$qr .= "JOIN customer_area AS cs ON cr.area_code = cs.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getCustomerClassRule($id)
{
$qr = "SELECT cs.code, cs.code, cs.name FROM discount_rule_customer_class AS cr ";
$qr .= "JOIN customer_class AS cs ON cr.class_code = cs.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getProductStyleRule($id)
{
$qr = "SELECT ps.code FROM discount_rule_product_style AS sr ";
$qr .= "JOIN product_style AS ps ON sr.style_code = ps.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getProductGroupRule($id)
{
$qr = "SELECT ps.code, ps.name FROM discount_rule_product_group AS sr ";
$qr .= "JOIN product_group AS ps ON sr.group_code = ps.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getProductSubGroupRule($id)
{
$qr = "SELECT ps.code, ps.name FROM discount_rule_product_sub_group AS sr ";
$qr .= "JOIN product_sub_group AS ps ON sr.sub_group_code = ps.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getProductTypeRule($id)
{
$qr = "SELECT ps.code, ps.name FROM discount_rule_product_type AS sr ";
$qr .= "JOIN product_type AS ps ON sr.type_code = ps.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getProductKindRule($id)
{
$qr = "SELECT ps.code, ps.name FROM discount_rule_product_kind AS sr ";
$qr .= "JOIN product_kind AS ps ON sr.kind_code = ps.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getProductCategoryRule($id)
{
$qr = "SELECT ps.code, ps.name FROM discount_rule_product_category AS sr ";
$qr .= "JOIN product_category AS ps ON sr.category_code = ps.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getProductBrandRule($id)
{
$qr = "SELECT ps.code, ps.name FROM discount_rule_product_brand AS sr ";
$qr .= "JOIN product_brand AS ps ON sr.brand_code = ps.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getProductYearRule($id)
{
$qr = "SELECT year FROM discount_rule_product_year WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getChannelsRule($id)
{
$qr = "SELECT cn.name FROM discount_rule_channels AS cr ";
$qr .= "JOIN channels AS cn ON cr.channels_code = cn.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
public function getPaymentRule($id)
{
$qr = "SELECT cn.name FROM discount_rule_payment AS cr ";
$qr .= "JOIN payment_method AS cn ON cr.payment_code = cn.code ";
$qr .= "WHERE id_rule = ".$id;
return $this->db->query($qr);
}
/*
|----------------------------------
| END ใช้สำหรับแสดงรายละเอียดในหน้าพิมพ์
|----------------------------------
*/
/*
|----------------------------------
| BEGIN ใช้สำหรับหน้ากำหนดเงื่อนไข
|----------------------------------
*/
public function getRuleCustomerId($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_customer');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->customer_code] = $rd->customer_code;
}
}
return $sc;
}
public function getRuleCustomerGroup($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_customer_group');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->group_code] = $rd->group_code;
}
}
return $sc;
}
public function getRuleCustomerType($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_customer_type');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->type_code] = $rd->type_code;
}
}
return $sc;
}
public function getRuleCustomerKind($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_customer_kind');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->kind_code] = $rd->kind_code;
}
}
return $sc;
}
public function getRuleCustomerArea($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_customer_area');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->area_code] = $rd->area_code;
}
}
return $sc;
}
public function getRuleCustomerClass($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_customer_class');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->class_code] = $rd->class_code;
}
}
return $sc;
}
public function getRuleProductStyle($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_product_style');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->style_code] = $rd->style_code;
}
}
return $sc;
}
public function getRuleProductGroup($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_product_group');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->group_code] = $rd->group_code;
}
}
return $sc;
}
public function getRuleProductSubGroup($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_product_sub_group');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->sub_group_code] = $rd->sub_group_code;
}
}
return $sc;
}
public function getRuleProductType($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_product_type');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->type_code] = $rd->type_code;
}
}
return $sc;
}
public function getRuleProductKind($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_product_kind');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->kind_code] = $rd->kind_code;
}
}
return $sc;
}
public function getRuleProductCategory($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_product_category');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->category_code] = $rd->category_code;
}
}
return $sc;
}
public function getRuleProductYear($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_product_year');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->year] = $rd->year;
}
}
return $sc;
}
public function getRuleProductBrand($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_product_brand');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->brand_code] = $rd->brand_code;
}
}
return $sc;
}
public function getRuleChannels($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_channels');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->channels_code] = $rd->channels_code;
}
}
return $sc;
}
public function getRulePayment($id)
{
$sc = array();
$rs = $this->db->where('id_rule', $id)->get('discount_rule_payment');
if($rs->num_rows() > 0)
{
foreach($rs->result() as $rd)
{
$sc[$rd->payment_code] = $rd->payment_code;
}
}
return $sc;
}
public function set_all_customer($id, $value)
{
/*
1. set all customer = 1
2. delete customer rule
3. delete customer_group rule;
4. delete customer_type rule;
5. delete customer_kind rule;
6. delete customer_area rule;
7. delete customer_class rule;
*/
$result = new stdClass();
$result->status = TRUE;
$result->message = 'success';
if($value === 1)
{
//--- start transection
$this->db->trans_start();
//--- 1
$this->db->query("UPDATE discount_rule SET all_customer = 1 WHERE id = $id");
//--- 2
$this->db->query("DELETE FROM discount_rule_customer WHERE id_rule = $id");
//--- 3
$this->db->query("DELETE FROM discount_rule_customer_group WHERE id_rule = $id");
//--- 4
$this->db->query("DELETE FROM discount_rule_customer_type WHERE id_rule = $id");
//--- 5
$this->db->query("DELETE FROM discount_rule_customer_kind WHERE id_rule = $id");
//--- 6
$this->db->query("DELETE FROM discount_rule_customer_area WHERE id_rule = $id");
//--- 7
$this->db->query("DELETE FROM discount_rule_customer_class WHERE id_rule = $id");
//--- end transection
$this->db->trans_complete();
if($this->db->trans_status() === FALSE)
{
$result->status = FALSE;
$result->message = 'กำหนดลูกค้าทั้งหมดไม่สำเร็จ';
}
}
else
{
$rs = $this->db->query("UPDATE discount_rule SET all_customer = 0 WHERE id = $id");
if($rs === FALSE)
{
$result->status = FALSE;
$result->message = 'กำหนดลูกค้าทั้งหมดไม่สำเร็จ';
}
}
return $result;
}
public function set_customer_list($id, $cust_list)
{
/*
1. set all customer = 0
2. delete customers rule;
2.1 set customer rule;
3. delete customer_group rule;
4. delete customer_type rule;
5. delete customer_kind rule;
6. delete customer_area rule;
7. delete customer_class rule;
*/
$result = new stdClass();
$result->status = TRUE;
$result->message = 'success';
//---- start transection
$this->db->trans_start();
//--- 1.
$this->db->query("UPDATE discount_rule SET all_customer = 0 WHERE id = $id");
//--- 2.
$this->db->query("DELETE FROM discount_rule_customer WHERE id_rule = $id");
if(!empty($cust_list))
{
foreach($cust_list as $code)
{
$this->db->query("INSERT INTO discount_rule_customer (id_rule, customer_code) VALUES ($id, '$code')");
}
}
//--- 3
$this->db->query("DELETE FROM discount_rule_customer_group WHERE id_rule = $id");
//--- 4
$this->db->query("DELETE FROM discount_rule_customer_type WHERE id_rule = $id");
//--- 5
$this->db->query("DELETE FROM discount_rule_customer_kind WHERE id_rule = $id");
//--- 6
$this->db->query("DELETE FROM discount_rule_customer_area WHERE id_rule = $id");
//--- 7
$this->db->query("DELETE FROM discount_rule_customer_class WHERE id_rule = $id");
//--- end transection
$this->db->trans_complete();
if($this->db->trans_status() === FALSE)
{
$result->status = FALSE;
$result->message = 'กำหนดรายชื่อลูกค้าไม่สำเร็จ';
}
return $result;
}
public function set_customer_attr($id, $group, $type, $kind, $area, $class)
{
$result = new stdClass();
$result->status = TRUE;
$result->message = 'message';
//--- start transection
$this->db->trans_start();
//--- 1.
$this->db->query("DELETE FROM discount_rule_customer WHERE id_rule = $id");
//--- 2
$this->db->query("DELETE FROM discount_rule_customer_group WHERE id_rule = $id");
if(!empty($group))
{
foreach($group as $code)
{
$this->db->query("INSERT INTO discount_rule_customer_group (id_rule, group_code) VALUES ($id, '$code')");
}
}
//--- 3
$this->db->query("DELETE FROM discount_rule_customer_type WHERE id_rule = $id");
if(!empty($type))
{
foreach($type as $code)
{
$this->db->query("INSERT INTO discount_rule_customer_type (id_rule, type_code) VALUES ($id, '$code')");
}
}
//--- 4
$this->db->query("DELETE FROM discount_rule_customer_kind WHERE id_rule = $id");
if(!empty($kind))
{
foreach($kind as $code)
{
$this->db->query("INSERT INTO discount_rule_customer_kind (id_rule, kind_code) VALUES ($id, '$code')");
}
}
//--- 5
$this->db->query("DELETE FROM discount_rule_customer_area WHERE id_rule = $id");
if(!empty($area))
{
foreach($area as $code)
{
$this->db->query("INSERT INTO discount_rule_customer_area (id_rule, area_code) VALUES ($id, '$code')");
}
}
//--- 6
$this->db->query("DELETE FROM discount_rule_customer_class WHERE id_rule = $id");
if(!empty($class))
{
foreach($class as $code)
{
$this->db->query("INSERT INTO discount_rule_customer_class (id_rule, class_code) VALUES ($id, '$code')");
}
}
//--- end transection
$this->db->trans_complete();
if($this->db->trans_status() === FALSE)
{
$result->status = FALSE;
$result->message = 'กำหนดเงื่อนไขคุณลักษณะลูกค้าไม่สำเร็จ';
}
return $result;
}
public function set_all_product($id, $value)
{
$result = new stdClass();
$result->status = TRUE;
$result->message = 'success';
if($value == 1)
{
//--- start transection
$this->db->trans_start();
//--- 1.
$this->db->query("UPDATE discount_rule SET all_product = 1 WHERE id = $id");
//--- 2.
$this->db->query("DELETE FROM discount_rule_product_style WHERE id_rule = $id");
//--- 3
$this->db->query("DELETE FROM discount_rule_product_group WHERE id_rule = $id");
//--- 4
$this->db->query("DELETE FROM discount_rule_product_sub_group WHERE id_rule = $id");
//--- 5
$this->db->query("DELETE FROM discount_rule_product_category WHERE id_rule = $id");
//--- 6
$this->db->query("DELETE FROM discount_rule_product_type WHERE id_rule = $id");
//--- 7
$this->db->query("DELETE FROM discount_rule_product_kind WHERE id_rule = $id");
//--- 8
$this->db->query("DELETE FROM discount_rule_product_brand WHERE id_rule = $id");
//--- 9
$this->db->query("DELETE FROM discount_rule_product_year WHERE id_rule = $id");
//--- end transection
$this->db->trans_complete();
if($this->db->trans_status() === FALSE)
{
$result->status = FALSE;
$result->message = "บันทึกเงือนไขสินค้าไม่สำเร็จ";
}
}
else
{
//--- 1.
$this->db->query("UPDATE discount_rule SET all_product = 0 WHERE id = $id");
}
return $result;
}
public function set_product_style($id, $style)
{
$result = new stdClass();
$result->status = TRUE;
$result->message = 'success';
//---- start transection
$this->db->trans_start();
//--- 1.
$this->db->query("UPDATE discount_rule SET all_product = 0 WHERE id = $id");
//--- 2 ลบข้อมูลเก่าก่อน
$this->db->query("DELETE FROM discount_rule_product_style WHERE id_rule = $id");
if(!empty($style))
{
foreach($style as $code)
{
$this->db->query("INSERT INTO discount_rule_product_style (id_rule, style_code) VALUES ($id, '$code')");
}
}
//--- 3
$this->db->query("DELETE FROM discount_rule_product_group WHERE id_rule = $id");
//--- 4
$this->db->query("DELETE FROM discount_rule_product_sub_group WHERE id_rule = $id");
//--- 5
$this->db->query("DELETE FROM discount_rule_product_category WHERE id_rule = $id");
//--- 6
$this->db->query("DELETE FROM discount_rule_product_type WHERE id_rule = $id");
//--- 7
$this->db->query("DELETE FROM discount_rule_product_kind WHERE id_rule = $id");
//--- 8
$this->db->query("DELETE FROM discount_rule_product_brand WHERE id_rule = $id");
//--- 9
$this->db->query("DELETE FROM discount_rule_product_year WHERE id_rule = $id");
//--- end transection
$this->db->trans_complete();
if($this->db->trans_status() === FALSE)
{
$result->status = FALSE;
$result->message = 'กำหนดเงื่อนไขรุ่นสินค้าไม่สำเร็จ';
}
return $result;
}
public function set_product_attr($id, $group, $sub_group, $category, $type, $kind, $brand, $year)
{
$result = new stdClass();
$result->status = TRUE;
$result->message = 'success';
$this->db->trans_start();
//--- ลบเงื่อนไขรุ่นสินค้าก่อน
$this->db->where('id_rule', $id)->delete('discount_rule_product_style');
//--- กลุ่มสินค้า
$this->db->where('id_rule', $id)->delete('discount_rule_product_group');
if(!empty($group))
{
foreach($group as $code)
{
$this->db->insert('discount_rule_product_group', array('id_rule' => $id, 'group_code' => $code));
}
}
//--- กลุ่มย่อยสินค้า
$this->db->where('id_rule', $id)->delete('discount_rule_product_sub_group');
if(!empty($sub_group))
{
foreach($sub_group as $code)
{
$this->db->insert('discount_rule_product_sub_group', array('id_rule' => $id, 'sub_group_code' => $code));
}
}
//--- หมวดหมู่สินค้า
$this->db->where('id_rule', $id)->delete('discount_rule_product_category');
if(!empty($category))
{
foreach($category as $code)
{
$this->db->insert('discount_rule_product_category', array('id_rule' => $id, 'category_code' => $code));
}
}
//--- ประเภทสินค้า
$this->db->where('id_rule', $id)->delete('discount_rule_product_type');
if(!empty($type))
{
foreach($type as $code)
{
$this->db->insert('discount_rule_product_type', array('id_rule' => $id, 'type_code' => $code));
}
}
//--- ชนิดสินค้า
$this->db->where('id_rule', $id)->delete('discount_rule_product_kind');
if(!empty($kind))
{
foreach($kind as $code)
{
$this->db->insert('discount_rule_product_kind', array('id_rule' => $id, 'kind_code' => $code));
}
}
//--- ยี่ห้อ
$this->db->where('id_rule', $id)->delete('discount_rule_product_brand');
if(!empty($brand))
{
foreach($brand as $code)
{
$this->db->insert('discount_rule_product_brand', array('id_rule' => $id, 'brand_code' => $code));
}
}
//--- ปีสินค้า
$this->db->where('id_rule', $id)->delete('discount_rule_product_year');
if(!empty($year))
{
foreach($year as $code)
{
$this->db->insert('discount_rule_product_year', array('id_rule' => $id, 'year' => $code));
}
}
//--- end transection
$this->db->trans_complete();
if($this->db->trans_status() === FALSE)
{
$result->status = FALSE;
$result->message = 'กำหนดเงื่อนไขคุณลักษณะสินค้าไม่สำเร็จ';
}
return $result;
}
public function set_all_channels($id)
{
$result = new stdClass();
$result->status = TRUE;
$result->message = 'success';
$this->db->trans_start();
//--- ลบเงื่อนไขช่องทางขายทั้งหมดก่อน
$this->db->where('id_rule', $id)->delete('discount_rule_channels');
//--- update เงือนไข
$this->db->set('all_channels', 1)->where('id', $id)->update('discount_rule');
//--- end transection
$this->db->trans_complete();
if($this->db->trans_status() === FALSE)
{
$result->status = FALSE;
$result->message = 'กำหนดเงื่อนไขช่องทางขายไม่สำเร็จ';
}
return $result;
}
public function set_channels($id, $channels)
{
$result = new stdClass();
$result->status = TRUE;
$result->message = 'success';
$this->db->trans_start();
//--- update เงือนไข
$this->db->set('all_channels', 0)->where('id', $id)->update('discount_rule');
//--- ลบเงื่อนไขช่องทางขายทั้งหมดก่อน
$this->db->where('id_rule', $id)->delete('discount_rule_channels');
//--- insert ใหม่
if(!empty($channels))
{
foreach($channels as $code)
{
$this->db->insert('discount_rule_channels', array('id_rule' => $id, 'channels_code' => $code));
}
}
//--- end transection
$this->db->trans_complete();
if($this->db->trans_status() === FALSE)
{
$result->status = FALSE;
$result->message = 'กำหนดเงื่อนไขช่องทางขายไม่สำเร็จ';
}
return $result;
}
public function set_all_payment($id)
{
$result = new stdClass();
$result->status = TRUE;
$result->message = 'success';
$this->db->trans_start();
//--- ลบเงื่อนไขทั้งหมดก่อน
$this->db->where('id_rule', $id)->delete('discount_rule_payment');
//--- update เงือนไข
$this->db->set('all_payment', 1)->where('id', $id)->update('discount_rule');
//--- end transection
$this->db->trans_complete();
if($this->db->trans_status() === FALSE)
{
$result->status = FALSE;
$result->message = 'กำหนดเงื่อนไขช่องการชำระเงินไม่สำเร็จ';
}
return $result;
}
public function set_payment($id, $payment)
{
$result = new stdClass();
$result->status = TRUE;
$result->message = 'success';
$this->db->trans_start();
//--- update เงือนไข
$this->db->set('all_payment', 0)->where('id', $id)->update('discount_rule');
//--- ลบเงื่อนไขช่องทางขายทั้งหมดก่อน
$this->db->where('id_rule', $id)->delete('discount_rule_payment');
//--- insert ใหม่
if(!empty($payment))
{
foreach($payment as $code)
{
$this->db->insert('discount_rule_payment', array('id_rule' => $id, 'payment_code' => $code));
}
}
//--- end transection
$this->db->trans_complete();
if($this->db->trans_status() === FALSE)
{
$result->status = FALSE;
$result->message = 'กำหนดเงื่อนไขช่องทางการชำระเงินไม่สำเร็จ';
}
return $result;
}
/*
|----------------------------------
| END ใช้สำหรับหน้ากำหนดเงื่อนไข
|----------------------------------
*/
public function update_policy($id_rule, $id_policy)
{
return $this->db->set('id_policy', $id_policy)->where('id', $id_rule)->update('discount_rule');
}
public function count_rows($code, $name, $active, $policy, $discount)
{
$qr = "SELECT id FROM discount_rule WHERE isDeleted = 0 ";
if($code != "")
{
$qr .= "AND code LIKE '%".$code."%' ";
}
if($name != "")
{
$qr .= "AND name LIKE '%".$name."%' ";
}
if($active != 2)
{
$qr .= "AND active = ".$active." ";
}
if($policy != "")
{
$policies = discount_policy_in($policy);
$qr .= "AND id_policy IN(".$policies.") ";
}
if($discount != "")
{
$qr .= "AND item_disc = ".$discount." ";
}
$rs = $this->db->query($qr);
if($rs->num_rows() > 0)
{
return $rs->num_rows();
}
return 0;
}
public function get_data($code, $name, $active, $policy, $discount, $perpage = '', $offset = '')
{
$qr = "SELECT * FROM discount_rule WHERE isDeleted = 0 ";
if($code != "")
{
$qr .= "AND code LIKE '%".$code."%' ";
}
if($name != "")
{
$qr .= "AND name LIKE '%".$name."%' ";
}
if($active != 2)
{
$qr .= "AND active = ".$active." ";
}
if($policy != "")
{
$policies = discount_policy_in($policy);
$qr .= "AND id_policy IN(".$policies.") ";
}
if($discount != "")
{
$qr .= "AND item_disc = ".$discount." ";
}
$qr .= "ORDER BY code DESC";
$rs = $this->db->query($qr);
if($rs->num_rows() > 0)
{
return $rs->result();
}
return array();
}
public function get_policy_rules($id_policy)
{
$rs = $this->db->where('id_policy', $id_policy)->get('discount_rule');
if($rs->num_rows() > 0)
{
return $rs->result();
}
return array();
}
public function get_active_rule()
{
$rs = $this->db->where('active', 1)->where('id_policy IS NULL')->get('discount_rule');
if($rs->num_rows() > 0)
{
return $rs->result();
}
return array();
}
public function get_max_code($code)
{
$qr = "SELECT MAX(code) AS code FROM discount_rule WHERE code LIKE '".$code."%' ORDER BY code DESC";
$rs = $this->db->query($qr);
return $rs->row()->code;
}
public function search($txt)
{
$rs = $this->db->select('id')
->like('code', $txt)
->like('name', $txt)
->get('discount_rule');
if($rs->num_rows() > 0)
{
return $rs->result();
}
return array();
}
public function delete_rule($id)
{
//--- start transection
$this->db->trans_start();
//--- 1.
$this->db->where('id_rule', $id)->delete('discount_rule_product_style');
//--- 2.
$this->db->where('id_rule', $id)->delete('discount_rule_product_group');
//--- 3
$this->db->where('id_rule', $id)->delete('discount_rule_product_sub_group');
//--- 4
$this->db->where('id_rule', $id)->delete('discount_rule_product_category');
//--- 5
$this->db->where('id_rule', $id)->delete('discount_rule_product_type');
//--- 6
$this->db->where('id_rule', $id)->delete('discount_rule_product_kind');
//--- 7
$this->db->where('id_rule', $id)->delete('discount_rule_product_brand');
//--- 8
$this->db->where('id_rule', $id)->delete('discount_rule_product_year');
//--- 9
$this->db->where('id', $id)->delete('discount_rule');
//--- end transection
$this->db->trans_complete();
return $this->db->trans_status();
}
} //--- end class
?>
| 22.450761 | 113 | 0.559181 | 3.1875 |
7b2e60f67cfef4a8eb88c7558129e2f2a5cb827c
| 1,065 |
rb
|
Ruby
|
app/models/flogiston/layout.rb
|
flogic/flogiston-cms
|
bee42188bf63e50974f152ee82c14634fdec7ba2
|
[
"MIT"
] | 1 |
2016-05-09T06:02:59.000Z
|
2016-05-09T06:02:59.000Z
|
app/models/flogiston/layout.rb
|
flogic/flogiston-cms
|
bee42188bf63e50974f152ee82c14634fdec7ba2
|
[
"MIT"
] | null | null | null |
app/models/flogiston/layout.rb
|
flogic/flogiston-cms
|
bee42188bf63e50974f152ee82c14634fdec7ba2
|
[
"MIT"
] | null | null | null |
class Flogiston::Layout < Flogiston::AbstractPage
validates_uniqueness_of :handle
validates_presence_of :handle
def self.default
first(:conditions => { :default => true })
end
def make_default!
Layout.update_all({ :default => false }, "id <> #{self.id}")
update_attributes!(:default => true)
end
def full_contents(replacements = {})
self.class.expand(contents, replacements)
end
### for ActionView Layout fakery
def path_without_format_and_extension
"<Layout '#{handle}'>"
end
def render_template(view, local_assigns = {})
renderer = ActionView::Template.new('')
renderer.instance_eval <<-eval_string
def source
#{full_contents.inspect}
end
def recompile?
true
end
def extension
#{format.inspect}
end
eval_string
renderer.render_template(view, local_assigns)
end
def refresh
reload unless new_record?
self
end
###
### for ActionView Template (view) fakery
def exempt_from_layout?
false
end
###
end
| 20.09434 | 64 | 0.652582 | 3.03125 |
a14d03c58320380dbeef43480fca5e9da7898633
| 2,201 |
ts
|
TypeScript
|
src/visualizations/symbol.ts
|
sgratzl/cytoscape.js-overlays
|
4b0c36a6f10b62885e66d53a803e328353232c44
|
[
"MIT"
] | 10 |
2020-07-20T15:50:07.000Z
|
2022-01-03T10:48:08.000Z
|
src/visualizations/symbol.ts
|
sgratzl/cytoscape.js-overlays
|
4b0c36a6f10b62885e66d53a803e328353232c44
|
[
"MIT"
] | 2 |
2021-03-30T07:51:31.000Z
|
2021-05-26T20:25:23.000Z
|
src/visualizations/symbol.ts
|
sgratzl/cytoscape.js-overlays
|
4b0c36a6f10b62885e66d53a803e328353232c44
|
[
"MIT"
] | 1 |
2021-09-24T22:21:43.000Z
|
2021-09-24T22:21:43.000Z
|
import {
symbolCircle,
symbolCross,
symbolDiamond,
symbolSquare,
symbolStar,
symbolTriangle,
SymbolType,
symbolWye,
} from 'd3-shape';
import { INodeFunction, IVisualization } from './interfaces';
import { resolveFunction } from './utils';
const symbols = {
circle: symbolCircle,
cross: symbolCross,
diamond: symbolDiamond,
square: symbolSquare,
star: symbolStar,
triangle: symbolTriangle,
wye: symbolWye,
};
export interface ITextSymbol {
text: string;
font?: string;
}
export interface ISymbolOptions {
symbol: INodeFunction<keyof typeof symbols | CanvasImageSource | SymbolType | ITextSymbol | null>;
color: INodeFunction<string | null>;
}
function isSymbol(s: any): s is SymbolType {
return typeof (s as SymbolType).draw === 'function';
}
function isTextSymbol(s: any): s is ITextSymbol {
return typeof (s as ITextSymbol).text === 'string';
}
export function renderSymbol(options: Partial<ISymbolOptions> = {}): IVisualization {
const o = Object.assign(
{
symbol: 'circle',
color: '#cccccc',
} as ISymbolOptions,
options
);
const symbol = resolveFunction(o.symbol);
const backgroundColor = resolveFunction(o.color);
const r: IVisualization = (ctx, node, dim) => {
const bg = backgroundColor(node);
const s = symbol(node);
if (bg == null || s == null) {
return;
}
ctx.fillStyle = bg;
if (isSymbol(s) || typeof s === 'string') {
const sym = isSymbol(s) ? s : symbols[s as keyof typeof symbols] || symbolCircle;
ctx.translate(dim.width / 2, dim.height / 2);
ctx.beginPath();
sym.draw(ctx, 0.5 * (dim.width * dim.height));
ctx.fill();
ctx.translate(-dim.width / 2, -dim.height / 2);
} else if (isTextSymbol(s)) {
ctx.save();
if (s.font) {
ctx.font = s.font;
}
ctx.textAlign = 'center';
ctx.textBaseline = 'middle';
ctx.fillText(s.text, dim.width / 2, dim.height / 2);
ctx.restore();
} else {
// image source
ctx.drawImage(s as CanvasImageSource, 0, 0, dim.width, dim.height);
}
};
r.defaultHeight = 8;
r.defaultWidth = 8;
r.defaultPosition = 'top-left';
return r;
}
| 25.011364 | 100 | 0.636075 | 3.03125 |
21ce7130a6038b168dee6d9536818a7d0273af34
| 1,837 |
js
|
JavaScript
|
public/index.js
|
SaiSrichandra/Link-Previwer
|
64468c7490f5102705270e9442e7d9a0da03a2d4
|
[
"MIT"
] | null | null | null |
public/index.js
|
SaiSrichandra/Link-Previwer
|
64468c7490f5102705270e9442e7d9a0da03a2d4
|
[
"MIT"
] | null | null | null |
public/index.js
|
SaiSrichandra/Link-Previwer
|
64468c7490f5102705270e9442e7d9a0da03a2d4
|
[
"MIT"
] | null | null | null |
const getInfo = async (e) => {
e.preventDefault();
url = textBox.value
if (! url.startsWith("http")) {
url = "http://" + url
}
if (url){
try {
resp = await fetch('http://localhost:3000/scrape', {
method : "POST",
body : JSON.stringify({url}),
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
})
} catch (error) {
console.error("unsuccessful post")
}
jsonData = await resp.json()
// console.log(jsonData)
}
renderCard(jsonData)
}
const resetCard = () => {
cardImg.src = "https://i.stack.imgur.com/y9DpT.jpg"
cardDesc.textContent = "Example ( Previews info about the links )"
cardTitle.textContent = "Link Previewer"
cardUrl.textContent = "Link Previewer"
cardUrl.href = "#"
cardKeyword.textContent = ""
}
const renderCard = (jsonData) => {
resetCard();
if (jsonData.img){
cardImg.src = jsonData.img;
cardImg.classList.remove('hidden')
}
cardDesc.textContent = jsonData.desc
cardTitle.textContent = jsonData.title
cardUrl.textContent = jsonData.sitename || jsonData.url || jsonData.orgURL
cardUrl.href = jsonData.url || jsonData.orgURL
if (jsonData.keywords) {
keywords = jsonData.keywords.split(",")
keywords = keywords.map(key => {
content = key.trim()
const li = document.createElement('li')
li.textContent = content
li.classList.add("bg-red-400", "px-2" , "py-1", "rounded-md");
cardKeyword.appendChild(li)
})
}
}
button.addEventListener('click', getInfo)
| 27.41791 | 79 | 0.534023 | 3.03125 |
c66e49a060bfee068df065edc564071652396da9
| 6,803 |
py
|
Python
|
captcha_solver_object_detection/captcha_solver.py
|
gabrielmvas/captcha-solver
|
94ec61ba9219fcef316d907257a5d42b20a90944
|
[
"MIT"
] | null | null | null |
captcha_solver_object_detection/captcha_solver.py
|
gabrielmvas/captcha-solver
|
94ec61ba9219fcef316d907257a5d42b20a90944
|
[
"MIT"
] | null | null | null |
captcha_solver_object_detection/captcha_solver.py
|
gabrielmvas/captcha-solver
|
94ec61ba9219fcef316d907257a5d42b20a90944
|
[
"MIT"
] | null | null | null |
from object_detection.utils import label_map_util, visualization_utils as vis_util
import tensorflow as tf
import pandas as pd
import numpy as np
import cv2 as cv
import sys
import os
import collections
from pathlib import Path
BASE_DIR = Path(__file__).parent
class CaptchaSolver(object):
def __init__(self):
self.num_classes = 36
self.labels_path = str((BASE_DIR / 'model/labelmap.pbtxt').resolve())
self.modelckpt_path = str((BASE_DIR / 'model/frozen_inference_graph.pb').resolve())
self.tolerance = 0.6
self.model = None
self.detection_graph = None
def __load_label_map(self):
label_map = label_map_util.load_labelmap(self.labels_path)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=self.num_classes, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def __load_tfmodel(self):
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(self.modelckpt_path , 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.model = tf.Session(graph=self.detection_graph)
def get_boxes_coordinates(self, image, boxes, classes, scores, category_index, instance_masks=None, instance_boundaries=None, keypoints=None, use_normalized_coordinates=False, max_boxes_to_draw=6, min_score_thresh=.5, agnostic_mode=False, line_thickness=4, groundtruth_box_visualization_color='black', skip_scores=False, skip_labels=False):
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_score_map = {}
box_to_keypoints_map = collections.defaultdict(list)
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(min(max_boxes_to_draw, boxes.shape[0])):
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in category_index.keys():
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100*scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100*scores[i]))
box_to_display_str_map[box].append(display_str)
box_to_score_map[box] = scores[i]
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
else:
box_to_color_map[box] = vis_util.STANDARD_COLORS[classes[i] % len(vis_util.STANDARD_COLORS)]
coordinates_list = []
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
height, width, channels = image.shape
coordinate = dict(xmin=int(),xmax=int(),ymin=int(),ymax=int())
coordinate['ymin'] = int(ymin*height)
coordinate['ymax'] = int(ymax*height)
coordinate['xmin'] = int(xmin*width)
coordinate['xmax'] = int(xmax*width)
coordinates_list.append(coordinate)
return coordinates_list
def predict_captcha(self, image_path):
self.__load_tfmodel()
image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
image = cv.imread(image_path)
image_rgb = cv.cvtColor(image, cv.COLOR_BGR2RGB)
image_expanded = np.expand_dims(image_rgb, axis=0)
(boxes, scores, classes, num) = self.model.run([detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
category_index = self.__load_label_map()
coordinates = self.get_boxes_coordinates(image, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores), category_index, min_score_thresh=self.tolerance)
digits = self.__get_digits_prediction(category_index, (boxes, scores, classes, num), coordinates)
solved_captcha = self.__get_solved_captcha(digits)
return solved_captcha
def __get_digits_prediction(self, category_index, model_output, coordinates, threshold=0.6):
digits = []
for x in range(len(model_output[1][0])):
if model_output[1][0][x] > threshold:
digits.append(dict(label=category_index[model_output[2][0][x]]['name'], score=float(model_output[1][0][x]),
coordenadas=coordinates[x], xmin=coordinates[x]['xmin']))
return sorted(digits, key=lambda digit:digit['xmin'])
def __get_solved_captcha(self, digits):
solved_captcha = ''
for digit in digits:
solved_captcha = solved_captcha + digit['label']
return solved_captcha
| 47.908451 | 344 | 0.593121 | 3.140625 |
af9003a53d4c4f8783b32f064683910bb226ed82
| 8,946 |
py
|
Python
|
shipt/receipts.py
|
dcalacci/shipt-calculator
|
b53f3d09594f9ed0b0f950f8646b9ba5dae94fd9
|
[
"MIT"
] | 4 |
2021-03-14T21:43:30.000Z
|
2021-11-17T10:53:10.000Z
|
shipt/receipts.py
|
dcalacci/shipt-calculator
|
b53f3d09594f9ed0b0f950f8646b9ba5dae94fd9
|
[
"MIT"
] | 3 |
2021-01-20T17:34:13.000Z
|
2021-09-13T17:05:04.000Z
|
shipt/receipts.py
|
dcalacci/shipt-calculator
|
b53f3d09594f9ed0b0f950f8646b9ba5dae94fd9
|
[
"MIT"
] | null | null | null |
from PIL import Image, ImageEnhance, ImageFilter
from datetime import datetime
import pytesseract
import argparse
import cv2
import os
import glob
import pandas as pd
import numpy as np
import string
import random
import math
def strip_punc(s): return s.translate(
str.maketrans('', '', string.punctuation))
def image_to_text(image_filename):
image = cv2.imread(image_filename)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# scale 1.5x
gray = cv2.resize(gray, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
# enhancer = ImageEnhance.Contrast(gray)
# gray = enhancer.enhance(2)
# gray = gray.convert('1')
# random filename
filename = "/tmp/{}.png".format(os.getpid() + random.randint(1, 100))
cv2.imwrite(filename, gray)
text = pytesseract.image_to_string(Image.open(filename),
config='--psm 6')
os.remove(filename)
return text
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def bad_entries_to_na(data):
for k, v in data.items():
if k in ['delivery_window_start', 'delivery_window_end',
'order_pay', 'order_total', 'tip', 'total_pay']:
test_str = str(v).replace("PM", "").replace("AM", "")
if not is_number(test_str):
data[k] = np.nan
return data
def to_number_or_none(x):
try:
return pd.to_numeric(x)
except:
return None
def guess_better_numbers(data):
""" Guesses better numbers if the order pay, tip, promo pay, and total pay don't add up.
also creates a new order number from the pay #s if it's been parsed wrong. We check if the order
number has been parsed wrong by testing if it's parse-able into a number.
new order # is: <tip><totalpay><delivery day-of-month>
"""
if 'tip' not in data or 'order_pay' not in data or 'promo_pay' not in data:
return data
data['tip'] = to_number_or_none(data['tip'])
data['total_pay'] = to_number_or_none(data['total_pay'])
data['order_pay'] = to_number_or_none(data['order_pay'])
data['promo_pay'] = to_number_or_none(data['promo_pay'])
if (data['order_pay'] == None or data['total_pay'] == None):
return data
while data['order_pay'] > data['total_pay']:
data['order_pay'] = round(data['order_pay'] / 10, 2)
while data['tip'] > data['total_pay']:
data['tip'] = round(data['tip'] / 10, 2)
if data['tip'] + data['order_pay'] + data['promo_pay'] != data['total_pay']:
# the order pay is *always* a decimal, basically. if it's not,
# it almost definitely means it was parsed incorrectly.
# breaks, obviously, when tip is greater than promo pay, but that
# rarely happens.
if data['order_pay'] > data['total_pay']:
data['order_pay'] == data['total_pay'] - \
data['promo_pay'] - data['tip']
elif data['total_pay'] > data['order_pay'] + data['tip'] + data['promo_pay']:
data['total_pay'] = data['order_pay'] + \
data['tip'] + data['promo_pay']
elif data['tip'] > data['total_pay']:
data['tip'] = data['total_pay'] - \
data['order_pay'] - data['promo_pay']
# order number
new_ordernum = "{}{}{}".format(
int(data['tip']),
int(data['total_pay']),
data['delivery_date'].split('/')[1])
if 'order_number' not in data:
data['order_number'] = new_ordernum
else:
try:
ordernum = int(data['order_number'])
except ValueError:
data['order_number'] = new_ordernum
return data
def receipt_to_df(image_filename, verbose=False):
text = image_to_text(image_filename)
def fword(s): return s.split(" ")[0]
def line_items(s): return s.split(" ")
all_data = []
data = {}
if verbose:
print("-----------------")
print(image_filename)
lines = text.split("\n")
for n, line in enumerate(lines):
first_word = fword(line)
if verbose:
print(line_items(line))
print('window' in line.lower())
print(line.lower())
print(first_word)
if (first_word == 'Window'):
if (verbose):
print("new delivery window parsing...")
lm = line_items(line)
month = lm[1]
day = lm[2]
times = lm[3].split("-")
# assume current year
year = datetime.today().year
date = datetime.strptime("{} {} {}".format(
month, day, year), "%b %d, %Y")
datestr = datetime.strftime(date, "%m/%d/%Y")
data['delivery_window_start'] = times[0]
data['delivery_window_end'] = times[1]
data['delivery_date'] = datestr
data['delivered_date'] = datestr
elif (first_word == 'Delivery'):
if line_items(line)[1] == "Only":
data["delivery_only"] = True
continue
elif "delivery_only" not in data:
data["delivery_only"] = False
if 'window' in line.lower():
if (verbose):
print("delivery window parsing...", line, lines[n+1])
if len(line_items(line)) == 2:
line = " ".join([line, lines[n+1]])
delivery_window = line.split(":")[-1] # gets the XX to XX
data["delivery_date"] = line_items(line)[2].split(":")[
0].strip() # removes colon
data["delivery_window_start"] = strip_punc(
delivery_window.split("to")[0].strip())
data["delivery_window_end"] = strip_punc(
delivery_window.split("to")[1].strip())
elif (first_word == 'Delivered'):
data["delivered_date"] = line_items(line)[1][:-1] # removes comma
if data["delivered_date"] == "Today":
data["delivered_date"] = datetime.now().strftime("%m/%d/%Y")
data["delivered_time"] = " ".join(
line_items(line)[2:]) # merges time and AM/PM
elif (first_word == 'Order' and line_items(line)[1] == 'Pay'):
data["order_pay"] = line_items(
line)[2][1:].strip().replace(",", "")
elif (first_word == 'Order'):
# new cards only have 4 on this line
data["order_number"] = strip_punc(
line_items(line)[1]) # remove hash
# Some of them don't have an order total
if len(line_items(line)) >= 4:
# sometimes it picks up the dot between total and the amount as a plus
# or an arrow but sometimes, it doesn't. If it has this, it's pretty
# likely there's an order total.
if (any(c in line_items(line)[2].strip() for c in ["+", "*", "-", "»", "«"]) or
len(line_items(line)[2]) == 1):
print("ORDER TOTAL")
data["order_total"] = line_items(
line)[3][1:].strip().replace(",", "")
print(data['order_total'])
else:
print("ORDER total 2222")
data["order_total"] = line_items(
line)[2][1:].strip().replace(",", "")
print(data['order_total'])
else:
data["order_total"] = np.nan
if "Time" in line_items(line)[-1]:
data["late"] = False
elif "Late" in line_items(line)[-1]:
data["late"] = True
elif (first_word == "Tip"):
data["tip"] = line_items(line)[-1][1:]
elif (first_word == 'Promo'):
data["promo_pay"] = line_items(line)[2][1:].strip()
elif (first_word == "Total"):
data["total_pay"] = line_items(
line)[2][1:].strip().replace(",", "")
if 'promo_pay' not in data:
data['promo_pay'] = 0
data["filename"] = image_filename
data = guess_better_numbers(data)
## make sure order_total is in there too -- if it's not, we can't use it as data.
if 'order_number' in data and data['order_number'] != '' and 'order_total' in data:
if verbose:
print("Adding", data, "to all_data...")
all_data.append(data)
data = {}
else:
continue
df = pd.DataFrame(all_data)
df["date_submitted"] = datetime.now().strftime("%m/%d/%Y")
df[["order_pay", "tip", "order_total", "total_pay", "promo_pay"]] = df[["order_pay", "tip",
"order_total", "total_pay", "promo_pay"]].apply(to_number_or_none)
return df.fillna("")
| 39.409692 | 142 | 0.533646 | 3.28125 |
81b86e64e6951416c487d33c6c9efa699bcd2df4
| 2,168 |
swift
|
Swift
|
Sources/SoundpipeAudioKit/Effects/VariableDelay.swift
|
Moriquendi/SoundpipeAudioKit
|
086653b852094586f11c84d48286beae1e2b0929
|
[
"MIT"
] | 30 |
2021-05-21T14:09:12.000Z
|
2022-03-17T16:12:09.000Z
|
Sources/SoundpipeAudioKit/Effects/VariableDelay.swift
|
Moriquendi/SoundpipeAudioKit
|
086653b852094586f11c84d48286beae1e2b0929
|
[
"MIT"
] | 12 |
2021-05-29T06:56:08.000Z
|
2022-01-29T15:59:06.000Z
|
Sources/SoundpipeAudioKit/Effects/VariableDelay.swift
|
Moriquendi/SoundpipeAudioKit
|
086653b852094586f11c84d48286beae1e2b0929
|
[
"MIT"
] | 19 |
2021-06-10T03:30:07.000Z
|
2022-03-22T16:48:04.000Z
|
// Copyright AudioKit. All Rights Reserved. Revision History at http://github.com/AudioKit/AudioKit/
// This file was auto-autogenerated by scripts and templates at http://github.com/AudioKit/AudioKitDevTools/
import AVFoundation
import AudioKit
import AudioKitEX
import CSoundpipeAudioKit
/// A delay line with cubic interpolation.
public class VariableDelay: Node {
let input: Node
/// Connected nodes
public var connections: [Node] { [input] }
/// Underlying AVAudioNode
public var avAudioNode = instantiate(effect: "vdla")
// MARK: - Parameters
/// Specification details for time
public static let timeDef = NodeParameterDef(
identifier: "time",
name: "Delay time (Seconds)",
address: akGetParameterAddress("VariableDelayParameterTime"),
defaultValue: 0,
range: 0 ... 10,
unit: .seconds)
/// Delay time (in seconds) This value must not exceed the maximum delay time.
@Parameter(timeDef) public var time: AUValue
/// Specification details for feedback
public static let feedbackDef = NodeParameterDef(
identifier: "feedback",
name: "Feedback (%)",
address: akGetParameterAddress("VariableDelayParameterFeedback"),
defaultValue: 0,
range: 0 ... 1,
unit: .generic)
/// Feedback amount. Should be a value between 0-1.
@Parameter(feedbackDef) public var feedback: AUValue
// MARK: - Initialization
/// Initialize this delay node
///
/// - Parameters:
/// - input: Input node to process
/// - time: Delay time (in seconds) This value must not exceed the maximum delay time.
/// - feedback: Feedback amount. Should be a value between 0-1.
/// - maximumTime: The maximum delay time, in seconds.
///
public init(
_ input: Node,
time: AUValue = timeDef.defaultValue,
feedback: AUValue = feedbackDef.defaultValue,
maximumTime: AUValue = 5
) {
self.input = input
setupParameters()
akVariableDelaySetMaximumTime(au.dsp, maximumTime)
self.time = time
self.feedback = feedback
}
}
| 30.111111 | 108 | 0.654059 | 3 |
2c62f406122f411034e72c66b8c2b1d7643ab8be
| 5,239 |
py
|
Python
|
Experiments/CodeBert_CodeToText/Preprocessing/jsonl_to_java.py
|
wubero/Lampion
|
8a81b3381dee48ffab8cf7ee1b57e0eea8aaeba2
|
[
"MIT"
] | 1 |
2022-02-20T11:42:18.000Z
|
2022-02-20T11:42:18.000Z
|
Experiments/CodeBert_CodeToText/Preprocessing/jsonl_to_java.py
|
ciselab/Lampion
|
ba457d152a83e9b58072ec4676cc340b5b5afb1b
|
[
"MIT"
] | 28 |
2021-12-06T07:10:27.000Z
|
2022-03-25T09:42:51.000Z
|
Experiments/CodeBert_CodeToText/Preprocessing/jsonl_to_java.py
|
wubero/Lampion
|
8a81b3381dee48ffab8cf7ee1b57e0eea8aaeba2
|
[
"MIT"
] | 2 |
2021-11-25T08:32:45.000Z
|
2022-02-20T11:42:27.000Z
|
import jsonlines # For adressing json values as dictionaries in jsonl files
import os # For File/Directory Creation
import sys # For handling command args
"""
Notes:
The filename needs to be class+function as otherwise they will overwrite each other
"""
#TODO: Add a bit of debug info
def readJsonl_andPrintAllToJava(jsonl_file: str, output_prefix:str = "./output/") -> ():
"""
This method reads a jsonl_file and creates a javafile for each entry found.
It also adds the remaining information of the json as a header to the file in a comment.
:param jsonl_file: the filepath to look for the jsonl
:return: nothing, creates one java file per line found
"""
counter = 0
with jsonlines.open(jsonl_file) as f:
for line in f.iter():
write_to_file(line,output_prefix)
counter = counter + 1
print(f"Wrote {counter} Entries from the JSONL file {jsonl_file} to .java files in {output_prefix}.")
def write_to_file(line:str, output_prefix: str = "./output/" ) -> ():
"""
This method creates a full java file given a jsonl line.
The java file will be created and named according to the the path and file name specified in the json line.
:param line: The JsonL line to be written to the file
:param output_prefix: the directory to which to write all the files, default to ./output/
:return: creates a directory
"""
# Read all relevant position information from the jsonl
(path, filename, package, classname) = split_path_to_parts(line['path'])
# Create the directories if necessary
p = output_prefix + "/" + path
os.makedirs(p,exist_ok=True)
# create the file, fill it with the java class and close it
func = line['func_name'].split('.')[1]
f = open(output_prefix + "/" + path + "/" + classname + "_" + func + ".java","w")
f.write(wrap_in_class_and_package(line))
f.close()
# This var simply holds all seen combinations of function and class names
# It is necessary as some methods are overloaded and would result in the same file
# Which results in issues with the java obfuscation which fails on duplicate java classes
seen_class_names = []
def wrap_in_class_and_package(line):
"""
Wraps the content of the given line into a java package+class+markup_info
Required as the entries are on function level,but the obfuscator runs on class level.
This method does not write to a file, it simply alters the dictionary to a string.
TODO: Maybe better format for markups, maybe move header up before the package
:param line: the line of jsonl to be wrapped in a .java file with markup words for other attributes
:return: the line wrapped in package and class
"""
(path, file, package, classname) = split_path_to_parts(line['path'])
func = line['func_name'].split('.')[1]
# There was an issue on the java-obfuscation side that had troubles with duplicate java classes
# This was due to the naming here, especially for overloaded functions
# Hence, if there was a method already seen, just add a counter at the end of the classname to be unique
final_classname=f"{classname}_{func}"
counter = 2
while(final_classname in seen_class_names):
final_classname=f"{classname}_{func}_{counter}"
counter = counter + 1
seen_class_names.append(final_classname)
# Python has escape, but double { are the {-escape
filecontent = f"""
package {package};
/*
python_helper_header_start
ur_repo {line['repo']} ur_repo
ur_url {line['url']} ur_url
ur_path {line['path']} ur_path
ur_func_name {line['func_name']} ur_func_name
ur_docstring {(line['docstring']).encode('utf-8')} ur_docstring
ur_doctokens {line['docstring_tokens']} ur_doctokens
ur_sha {line['sha']} ur_sha
ur_partition {line['partition']} ur_partition
python_helper_header_end
*/
public class {final_classname} {{
{line['code']}
}}
"""
return filecontent
def split_path_to_parts(path: str):
"""
Helper to separate paths into information pieces
:param path: the path of a java file including the file itself
:return: a tuple of path,file,package name derived from path and class name derived from file
"""
parts = path.split('/')
package = ".".join(parts[:-1])
path = "/".join(parts[:-1])
file_name = parts[-1]
classname = file_name.split(".")[0]
values = (path,file_name,package,classname)
return values
if __name__ == '__main__':
"""
Note: The args start at 1, because sys.argv[0] is the script name itself
"""
print("Running JsonL to Java ")
if(len(sys.argv)==1):
print("received no arguments - trying to default to 'java.jsonl' and writing to 'output'")
readJsonl_andPrintAllToJava('java.jsonl')
elif(len(sys.argv)==2):
print(f"Trying to read {sys.argv[1]}, writing to 'output'")
readJsonl_andPrintAllToJava(sys.argv[1])
elif(len(sys.argv)==3):
print(f"Trying to read {sys.argv[1]}, writing to {sys.argv[2]}")
readJsonl_andPrintAllToJava(sys.argv[1],sys.argv[2])
else:
print("Received an unkown number of arguments - aborting")
| 41.912 | 111 | 0.683718 | 3.3125 |
f95562523b9fa2158501687fbdb772073ea275a2
| 6,235 |
go
|
Go
|
utils/wal.go
|
homingway/hickwall
|
2e1063aa3cf5eeee29adc3a494c3633ec8fc4e0c
|
[
"Apache-2.0"
] | 2 |
2016-10-21T12:24:49.000Z
|
2017-03-30T06:04:51.000Z
|
utils/wal.go
|
homingway/hickwall
|
2e1063aa3cf5eeee29adc3a494c3633ec8fc4e0c
|
[
"Apache-2.0"
] | null | null | null |
utils/wal.go
|
homingway/hickwall
|
2e1063aa3cf5eeee29adc3a494c3633ec8fc4e0c
|
[
"Apache-2.0"
] | 1 |
2019-01-14T07:09:16.000Z
|
2019-01-14T07:09:16.000Z
|
package utils
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
// "path"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
)
type Wal struct {
lock sync.Mutex
filename string // should be set to the actual filename
fp *os.File
max_size_kb int64 // max size in kb
max_rolls int
stop chan bool // stop chan
is_index bool
}
var (
islast bool
indexfilename string
datafile string
offset int64
)
// func Split(path string) (dir, file string) {
// i := strings.LastIndex(path, "\\")
// return path[:i+1], path[i+1:]
// }
// func Join(elem ...string) string {
// for i, e := range elem {
// if e != "" {
// return path.Clean(strings.Join(elem[i:], ""))
// }
// }
// return ""
// }
// fileExists return flag whether a given file exists
// and operation error if an unclassified failure occurs.
func fileExists(path string) (bool, error) {
_, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
//the position of string in slice
func pos(value string, slice []string) int {
for p, v := range slice {
if v == value {
return p
}
}
return -1
}
// Make a new wal. Return nil if error occurs during setup.
func NewWal(filename string, max_size_kb int64, max_rolls int, is_index bool) (*Wal, error) {
var lock sync.Mutex
w := &Wal{filename: filename,
lock: lock,
max_size_kb: max_size_kb,
max_rolls: max_rolls,
stop: make(chan bool),
is_index: is_index,
}
err := w.create_output(filename)
if err != nil {
return nil, err
}
if is_index {
indexfilename = filename + ".index"
}
go w.watching_myself(w.stop)
return w, nil
}
func (w *Wal) Close() error {
w.stop <- true
return nil
}
func (w *Wal) ListArchives() []string {
archives := []string{}
dir, _ := filepath.Split(w.filename)
// dir := w.filename[:strings.LastIndex(w.filename, "\\")+1]
files, _ := ioutil.ReadDir(dir)
for _, file := range files {
if file.IsDir() {
continue
} else {
archives = append(archives, filepath.Join(dir, file.Name()))
}
}
sort.Sort(sort.StringSlice(archives))
return archives
}
// Write satisfies the io.Writer interface.
func (w *Wal) Write(output []byte) (int, error) {
w.lock.Lock()
defer w.lock.Unlock()
return w.fp.Write(output)
}
func (w *Wal) WriteLine(data string) (int, error) {
return w.Write([]byte(data + "\n"))
}
func (w *Wal) watching_myself(stop chan bool) {
if stop == nil {
panic("stop chan is nil")
}
tick := time.Tick(time.Second)
for {
select {
case <-tick:
currentSize, err := w.GetSizeKb()
if err != nil {
return
}
if currentSize > w.GetLimitKb() {
// rotate.
err := w.rotate()
if err != nil {
fmt.Println(err)
}
}
case <-stop:
return
}
}
}
func (w *Wal) GetSizeKb() (int64, error) {
fi, err := os.Stat(w.filename)
if err != nil {
return 0, err
}
return fi.Size() / 1024, nil
}
func (w *Wal) GetLimitKb() int64 {
return w.max_size_kb
}
func (w *Wal) create_output(log_filepath string) (err error) {
w.lock.Lock()
defer w.lock.Unlock()
if w.fp != nil {
w.fp.Close()
w.fp = nil
}
output, err := os.OpenFile(log_filepath[:], os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
w.fp = output
if err != nil {
return err
}
return
}
// Perform the actual act of rotating and reopening file.
func (w *Wal) rotate() (err error) {
w.lock.Lock()
defer w.lock.Unlock()
// Close existing file if open
if w.fp != nil {
err = w.fp.Close()
w.fp = nil
if err != nil {
return
}
}
// Rename dest file if it already exists
var newname string
_, err = os.Stat(w.filename)
if err == nil {
newname = w.filename + "." + time.Now().Format("20060102_150405")
err = os.Rename(w.filename, newname)
if err != nil {
return
}
}
//remove over file
files := w.ListArchives()
if len(files) > w.max_rolls {
for _, file := range files[:len(files)-w.max_rolls] {
os.Remove(file)
}
}
// Create a file.
// w.fp, err = os.Create(w.filename)
w.fp, err = os.OpenFile(w.filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
if w.is_index {
datafile, _, err = w.GetIndex()
if err == nil {
if datafile == "" || datafile == w.filename {
datafile = newname
}
err := w.SetIndex()
if err != nil {
fmt.Println(err)
}
}
}
return
}
// commit index.
func (w *Wal) Commit() (err error) {
if islast == true {
err = w.DeleteFinishedArchives()
if err == nil {
islast = false
}
}
if exist, _ := fileExists(datafile); exist == true {
w.SetIndex()
}
return
}
func (w *Wal) DeleteFinishedArchives() (err error) {
w.lock.Lock()
defer w.lock.Unlock()
err = os.Remove(datafile)
if err != nil {
fmt.Println(err)
}
files := w.ListArchives()
if len(files) >= 2 {
datafile = files[len(files)-2]
}
offset = 0
return
}
//read line refer to index
func (w *Wal) ReadLine() (line string, err error) {
var (
file *os.File
part []byte
prefix bool
)
datafile, offset, err = w.GetIndex()
exist, err := fileExists(datafile)
if datafile == "" || !exist {
files := w.ListArchives()
if len(files) >= 1 {
datafile = files[len(files)-1]
} else {
datafile = w.filename
}
}
if file, err = os.Open(datafile); err != nil {
return
}
defer file.Close()
file.Seek(offset, os.SEEK_CUR)
reader := bufio.NewReader(file)
buffer := bytes.NewBuffer(make([]byte, 1024))
part, prefix, err = reader.ReadLine()
buffer.Write(part)
if !prefix {
line = buffer.String()
buffer.Reset()
}
if err == io.EOF {
islast = true
return
}
offset += int64(len(line) - 1024 + len("\n"))
return
}
//set index
func (w *Wal) SetIndex() error {
newindex := datafile + "|" + strconv.FormatInt(offset, 10)
ioutil.WriteFile(indexfilename, []byte(newindex), 0)
return nil
}
//get index
func (w *Wal) GetIndex() (string, int64, error) {
if exist, _ := fileExists(indexfilename); exist != true {
return "", 0, nil
}
buf, err := ioutil.ReadFile(indexfilename)
if err != nil {
return "", 0, err
}
index := string(buf)
ss := strings.Split(index, "|")
off, err := strconv.ParseInt(ss[1], 10, 64)
file := ss[0]
return file, off, nil
}
| 18.836858 | 93 | 0.615557 | 3.3125 |
44d3312e0e689e20afc922d73b6c979b13bbce15
| 1,802 |
lua
|
Lua
|
luigi/hooker.lua
|
poke1024/luigi
|
a909e037a5e8464ea33be305081408470b08bd3e
|
[
"MIT"
] | 119 |
2015-10-22T00:19:02.000Z
|
2022-03-13T15:54:19.000Z
|
luigi/hooker.lua
|
poke1024/luigi
|
a909e037a5e8464ea33be305081408470b08bd3e
|
[
"MIT"
] | 87 |
2019-03-28T03:35:16.000Z
|
2021-12-18T01:21:17.000Z
|
luigi/hooker.lua
|
poke1024/luigi
|
a909e037a5e8464ea33be305081408470b08bd3e
|
[
"MIT"
] | 24 |
2015-10-22T00:19:05.000Z
|
2022-02-02T07:04:05.000Z
|
local Hooker = {}
local wrapped = setmetatable({}, { __mode = 'k' })
local hooks = setmetatable({}, { __mode = 'k' })
local function unhook (item)
if item.prev then
item.prev.next = item.next
end
if item.next then
item.next.prev = item.prev
end
if hooks[item.host][item.key] == item then
hooks[item.host][item.key] = item.next
end
end
local function hook (host, key, func, atEnd)
if not func then
return
end
if not hooks[host] then
hooks[host] = {}
end
local current = hooks[host][key]
local item = {
next = not atEnd and current or nil,
unhook = unhook,
host = host,
key = key,
func = func,
}
if atEnd then
if current then
while current.next do
current = current.next
end
current.next = item
item.prev = current
else
hooks[host][key] = item
end
return item
end
if current then
current.prev = item
end
hooks[host][key] = item
return item
end
function Hooker.unhook (item)
return unhook(item)
end
function Hooker.hook (host, key, func, atEnd)
if not wrapped[host] then
wrapped[host] = {}
end
if not wrapped[host][key] then
wrapped[host][key] = true
hook(host, key, host[key])
host[key] = function (...)
local item = hooks[host][key]
while item do
local result = item.func(...)
if result ~= nil then
return result
end
item = item.next
end -- while
end -- function
end -- if
return hook(host, key, func, atEnd)
end
return Hooker
| 19.586957 | 50 | 0.523307 | 3.03125 |
2c4203c788bae20329c3c432c52b90e0ed71b658
| 2,474 |
py
|
Python
|
api/tests/audit_log/favourite/test_get_favourite_logs.py
|
nzediegwu1/favourites-backend
|
ad3587795d1dd44d350879364d443c43b727ca0c
|
[
"MIT"
] | 2 |
2019-08-05T08:03:05.000Z
|
2021-10-03T16:36:31.000Z
|
api/tests/audit_log/favourite/test_get_favourite_logs.py
|
nzediegwu1/favourites-backend
|
ad3587795d1dd44d350879364d443c43b727ca0c
|
[
"MIT"
] | 8 |
2020-02-12T00:56:35.000Z
|
2022-02-10T12:12:31.000Z
|
api/tests/audit_log/favourite/test_get_favourite_logs.py
|
nzediegwu1/favorite-things
|
ad3587795d1dd44d350879364d443c43b727ca0c
|
[
"MIT"
] | null | null | null |
from rest_framework.test import APITestCase
from api.models import Category
from api.tests.mocks import brite_core, brite_core_update
class TestGetFavouriteAuditLog(APITestCase):
favourite_id = 0
def setUp(self):
category = Category.objects.create(name='Company')
response = self.client.post('/favourites',
brite_core(category.id),
format='json')
self.favourite_id = response.data['id']
url = f'/favourites/{self.favourite_id}'
self.client.put(url, brite_core_update(category.id), format='json')
self.client.delete(url)
def test_get_audit_log_succeeds_with_existing_favourite_id(self):
response = self.client.get(f'/favourites/{self.favourite_id}/logs')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['message'], 'BriteCore-Audit Log')
self.assertEqual(len(response.data['data']), 3)
delete_audit_log = response.data['data'][0]
self.assertEqual(delete_audit_log['action'], 'delete')
self.assertEqual(delete_audit_log['resource_id'], self.favourite_id)
self.assertEqual(delete_audit_log['model'], 'favourite')
self.assertEqual(delete_audit_log['before']['title'], 'BriteCore')
self.assertEqual(delete_audit_log['after'], {})
update_audit_log = response.data['data'][1]
self.assertEqual(update_audit_log['action'], 'update')
self.assertEqual(update_audit_log['before']['title'], 'Brite Core')
self.assertEqual(update_audit_log['after']['title'], 'BriteCore')
self.assertEqual(update_audit_log['before']['ranking'], 3)
self.assertEqual(update_audit_log['after']['ranking'], 10)
creation_audit_log = response.data['data'][2]
self.assertEqual(creation_audit_log['action'], 'create')
self.assertEqual(creation_audit_log['before'], {})
self.assertEqual(creation_audit_log['after']['title'], 'Brite Core')
self.assertTrue('created_date' in creation_audit_log['after'])
self.assertTrue('modified_date' in creation_audit_log['after'])
def test_get_audit_log_fails_with_unexisting_favourite_id(self):
response = self.client.get(f'/favourites/4343434/logs')
self.assertEqual(response.status_code, 404)
self.assertEqual(response.data['detail'],
'favourite with pk 4343434, does not exist')
| 48.509804 | 76 | 0.672999 | 3.109375 |
beb8cbd40a93ab87dc2f1b9e51d04f455cb17472
| 1,999 |
ts
|
TypeScript
|
hooks/useWallet.ts
|
unit410/polyfile
|
1b9164606090209ef317caa13e71c43cb536aa93
|
[
"BlueOak-1.0.0"
] | null | null | null |
hooks/useWallet.ts
|
unit410/polyfile
|
1b9164606090209ef317caa13e71c43cb536aa93
|
[
"BlueOak-1.0.0"
] | null | null | null |
hooks/useWallet.ts
|
unit410/polyfile
|
1b9164606090209ef317caa13e71c43cb536aa93
|
[
"BlueOak-1.0.0"
] | null | null | null |
import { useEffect, useState } from 'react';
import type FilecoinApp from '@zondax/ledger-filecoin';
import Address from '~/common/Address';
import Wallet, { SignResponse, WalletAddress, WalletError } from '~/common/Wallet';
import { useLedger } from '~/components/LedgerProvider';
class LedgerWallet implements Wallet {
#app: FilecoinApp;
constructor(app: FilecoinApp) {
this.#app = app;
}
async addresses(): Promise<WalletAddress[]> {
const addresses = new Array<WalletAddress>();
// Load the first 10 addresses from the ledger
// This path is compatible with glif wallet addresses when using ledger
const pathBase = "m/44'/461'/0'/0";
for (let i = 0; i < 10; ++i) {
const path = `${pathBase}/${i}`;
const addrResponse = await this.#app.getAddressAndPubKey(path);
if (addrResponse.error_message !== 'No errors') {
console.error(new Error(`Error loading address at ${path}: ${addrResponse.error_message}`));
continue;
}
addresses.push({
path,
address: Address.FromString(addrResponse.addrString),
});
}
return addresses;
}
async sign(address: WalletAddress, message: Buffer): Promise<SignResponse> {
const resp = await this.#app.sign(address.path, message);
if (resp.error_message && resp.error_message !== 'No errors') {
throw new WalletError(resp.error_message, resp.return_code);
}
if (!resp.signature_compact || !resp.signature_der) {
throw new Error('missing signatures in ledger.sign response');
}
return {
returnCode: resp.return_code,
signatureCompact: resp.signature_compact,
signatureDer: resp.signature_der,
};
}
}
export default function useWallet(): Wallet | null {
const ledger = useLedger();
const [wallet, setWallet] = useState<LedgerWallet | null>(null);
useEffect(() => {
if (!ledger) {
return;
}
setWallet(new LedgerWallet(ledger));
}, [ledger]);
return wallet;
}
| 28.15493 | 100 | 0.65933 | 3.09375 |
2c88171d88209dc08eb247431d28689775412b3b
| 2,769 |
py
|
Python
|
data_utils/read_raw.py
|
ktxlh/HMGNN
|
66299909ca02f8e61f09304d2a064fd4fc983a78
|
[
"MIT"
] | null | null | null |
data_utils/read_raw.py
|
ktxlh/HMGNN
|
66299909ca02f8e61f09304d2a064fd4fc983a78
|
[
"MIT"
] | null | null | null |
data_utils/read_raw.py
|
ktxlh/HMGNN
|
66299909ca02f8e61f09304d2a064fd4fc983a78
|
[
"MIT"
] | null | null | null |
"""
From my Transformer's data.py
"""
import pandas as pd
import json
import random
from os import listdir
from os.path import isfile, join, isdir
from tqdm import tqdm
REAL, FAKE = 1, 0
SEED = 123
random.seed(SEED)
def read_politifact_input(dataset='politifact'):
in_dir = f'/rwproject/kdd-db/20-rayw1/FakeNewsNet/code/fakenewsnet_dataset/{dataset}'
rumorities = {'real': REAL, 'fake': FAKE}
inpt = []
for rumority, label in rumorities.items():
for news_id in tqdm(listdir(join(in_dir, rumority)), desc=f'{dataset}-{rumority}'):
content_fn = join(in_dir, rumority, news_id, 'news content.json')
if not isfile(content_fn): continue
with open(content_fn, 'r') as f:
content = json.load(f)
has_image = int(len(content["top_img"]) > 0)
num_images = len(content["images"])
num_exclam = (content["title"] + content["text"]).count("!")
tp = join(in_dir, rumority, news_id, 'tweets')
num_tweets = len(listdir(tp)) if isdir(tp) else 0
rp = join(in_dir, rumority, news_id, 'retweets')
num_retweets = len(listdir(rp)) if isdir(rp) else 0
other_features = [has_image, num_images, num_exclam, num_tweets, num_retweets]
inpt.append([news_id, content['title'] + " " + content["text"], label, other_features])
return inpt
def read_pheme_input(in_dir = '/rwproject/kdd-db/20-rayw1/pheme-figshare'):
rumorities = {'non-rumours': REAL, 'rumours': FAKE}
inpt = []
for event_raw in listdir(in_dir):
if event_raw[-16:] != '-all-rnr-threads': continue
# {event}-all-rnr-threads
event = event_raw[:-16]
for rumority, label in rumorities.items():
for news_id in tqdm(listdir(join(in_dir, event_raw, rumority)), desc=f'pheme-{event}-{rumority}'):
if news_id == '.DS_Store': continue
tweets_dir = join(in_dir, event_raw, rumority, news_id, 'source-tweets')
for tweets_fn in listdir(tweets_dir):
if tweets_fn == '.DS_Store': continue
with open(join(tweets_dir, tweets_fn), 'r') as f:
tweet = json.load(f)
other_features = [
tweet["favorite_count"], tweet["retweet_count"], tweet['user']['followers_count'],
tweet['user']['statuses_count'], tweet['user']['friends_count'], tweet['user']['favourites_count'],
len(tweet['user']['description'].split(' ')) if tweet['user']['description'] else 0,
]
inpt.append([tweet["id_str"], tweet['text'], label, other_features])
return inpt
| 47.741379 | 127 | 0.587938 | 3.109375 |
2189551ffcecd7562849e5b1bc959ce0263c73c0
| 1,856 |
js
|
JavaScript
|
test/test-func/15-func-no-replication-to-self.js
|
happner/happn-cluster
|
67b5103c1047ec4e13d066d9901e4c7f945a4dc9
|
[
"MIT"
] | null | null | null |
test/test-func/15-func-no-replication-to-self.js
|
happner/happn-cluster
|
67b5103c1047ec4e13d066d9901e4c7f945a4dc9
|
[
"MIT"
] | 51 |
2016-09-24T12:38:19.000Z
|
2021-08-12T13:33:18.000Z
|
test/test-func/15-func-no-replication-to-self.js
|
happner/happn-cluster
|
67b5103c1047ec4e13d066d9901e4c7f945a4dc9
|
[
"MIT"
] | null | null | null |
var path = require("path");
var filename = path.basename(__filename);
var expect = require("expect.js");
var Promise = require("bluebird");
var HappnClient = require("happn-3").client;
var hooks = require("../lib/hooks");
var testSequence = parseInt(filename.split("-")[0]);
var clusterSize = 1;
var happnSecure = true;
var proxySecure = true;
describe(filename, function() {
this.timeout(30000);
before(function() {
this.logLevel = process.env.LOG_LEVEL;
process.env.LOG_LEVEL = "off";
});
hooks.startCluster({
testSequence: testSequence,
size: clusterSize,
happnSecure: happnSecure,
proxySecure: proxySecure
});
var port;
before(function() {
var address = this.servers[0].services.proxy.__proxyServer._server.address();
port = address.port;
});
it("does not replicate to self in infinite loop", function(done) {
var client,
count = 0;
HappnClient.create({
config: {
url: "https://127.0.0.1:" + port,
username: "_ADMIN",
password: "secret"
}
})
.then(function(_client) {
client = _client;
})
.then(function() {
return new Promise(function(resolve, reject) {
client.on(
"/test/path",
function() {
count++;
},
function(e) {
if (e) return reject(e);
resolve();
}
);
});
})
.then(function() {
return client.set("/test/path", { some: "data" });
})
.then(function() {
return Promise.delay(100);
})
.then(function() {
expect(count).to.be(1);
})
.then(function() {
done();
})
.catch(done);
});
hooks.stopCluster();
after(function() {
process.env.LOG_LEVEL = this.logLevel;
});
});
| 22.095238 | 81 | 0.546336 | 3.234375 |
842a7d5690d06418434b18fba8aef07b910bc124
| 5,634 |
lua
|
Lua
|
cg.lua
|
nicholas-leonard/optim
|
5906efd4b601e63d5bc6e33be8d4621354111471
|
[
"BSD-3-Clause"
] | 199 |
2015-01-21T19:27:56.000Z
|
2021-10-02T16:50:30.000Z
|
cg.lua
|
nicholas-leonard/optim
|
5906efd4b601e63d5bc6e33be8d4621354111471
|
[
"BSD-3-Clause"
] | 108 |
2015-01-07T20:57:28.000Z
|
2019-07-27T00:49:49.000Z
|
cg.lua
|
nicholas-leonard/optim
|
5906efd4b601e63d5bc6e33be8d4621354111471
|
[
"BSD-3-Clause"
] | 207 |
2015-01-05T18:26:57.000Z
|
2022-03-23T16:29:58.000Z
|
--[[
This cg implementation is a rewrite of minimize.m written by Carl
E. Rasmussen. It is supposed to produce exactly same results (give
or take numerical accuracy due to some changed order of
operations). You can compare the result on rosenbrock with minimize.m.
http://www.gatsby.ucl.ac.uk/~edward/code/minimize/example.html
[x fx c] = minimize([0 0]', 'rosenbrock', -25)
Note that we limit the number of function evaluations only, it seems much
more important in practical use.
ARGS:
- `opfunc` : a function that takes a single input, the point of evaluation.
- `x` : the initial point
- `state` : a table of parameters and temporary allocations.
- `state.maxEval` : max number of function evaluations
- `state.maxIter` : max number of iterations
- `state.df[0,1,2,3]` : if you pass torch.Tensor they will be used for temp storage
- `state.[s,x0]` : if you pass torch.Tensor they will be used for temp storage
RETURN:
- `x*` : the new x vector, at the optimal point
- `f` : a table of all function values where
`f[1]` is the value of the function before any optimization and
`f[#f]` is the final fully optimized value, at x*
(Koray Kavukcuoglu, 2012)
--]]
function optim.cg(opfunc, x, config, state)
-- parameters
local config = config or {}
local state = state or config
local rho = config.rho or 0.01
local sig = config.sig or 0.5
local int = config.int or 0.1
local ext = config.ext or 3.0
local maxIter = config.maxIter or 20
local ratio = config.ratio or 100
local maxEval = config.maxEval or maxIter*1.25
local red = 1
local verbose = config.verbose or 0
local i = 0
local ls_failed = 0
local fx = {}
-- we need three points for the interpolation/extrapolation stuff
local z1,z2,z3 = 0,0,0
local d1,d2,d3 = 0,0,0
local f1,f2,f3 = 0,0,0
local df1 = state.df1 or x.new()
local df2 = state.df2 or x.new()
local df3 = state.df3 or x.new()
local tdf
df1:resizeAs(x)
df2:resizeAs(x)
df3:resizeAs(x)
-- search direction
local s = state.s or x.new()
s:resizeAs(x)
-- we need a temp storage for X
local x0 = state.x0 or x.new()
local f0 = 0
local df0 = state.df0 or x.new()
x0:resizeAs(x)
df0:resizeAs(x)
-- evaluate at initial point
f1,tdf = opfunc(x)
fx[#fx+1] = f1
df1:copy(tdf)
i=i+1
-- initial search direction
s:copy(df1):mul(-1)
d1 = -s:dot(s ) -- slope
z1 = red/(1-d1) -- initial step
while i < math.abs(maxEval) do
x0:copy(x)
f0 = f1
df0:copy(df1)
x:add(z1,s)
f2,tdf = opfunc(x)
df2:copy(tdf)
i=i+1
d2 = df2:dot(s)
f3,d3,z3 = f1,d1,-z1 -- init point 3 equal to point 1
local m = math.min(maxIter,maxEval-i)
local success = 0
local limit = -1
while true do
while (f2 > f1+z1*rho*d1 or d2 > -sig*d1) and m > 0 do
limit = z1
if f2 > f1 then
z2 = z3 - (0.5*d3*z3*z3)/(d3*z3+f2-f3)
else
local A = 6*(f2-f3)/z3+3*(d2+d3)
local B = 3*(f3-f2)-z3*(d3+2*d2)
z2 = (math.sqrt(B*B-A*d2*z3*z3)-B)/A
end
if z2 ~= z2 or z2 == math.huge or z2 == -math.huge then
z2 = z3/2;
end
z2 = math.max(math.min(z2, int*z3),(1-int)*z3);
z1 = z1 + z2;
x:add(z2,s)
f2,tdf = opfunc(x)
df2:copy(tdf)
i=i+1
m = m - 1
d2 = df2:dot(s)
z3 = z3-z2;
end
if f2 > f1+z1*rho*d1 or d2 > -sig*d1 then
break
elseif d2 > sig*d1 then
success = 1;
break;
elseif m == 0 then
break;
end
local A = 6*(f2-f3)/z3+3*(d2+d3);
local B = 3*(f3-f2)-z3*(d3+2*d2);
z2 = -d2*z3*z3/(B+math.sqrt(B*B-A*d2*z3*z3))
if z2 ~= z2 or z2 == math.huge or z2 == -math.huge or z2 < 0 then
if limit < -0.5 then
z2 = z1 * (ext -1)
else
z2 = (limit-z1)/2
end
elseif (limit > -0.5) and (z2+z1) > limit then
z2 = (limit-z1)/2
elseif limit < -0.5 and (z2+z1) > z1*ext then
z2 = z1*(ext-1)
elseif z2 < -z3*int then
z2 = -z3*int
elseif limit > -0.5 and z2 < (limit-z1)*(1-int) then
z2 = (limit-z1)*(1-int)
end
f3=f2; d3=d2; z3=-z2;
z1 = z1+z2;
x:add(z2,s)
f2,tdf = opfunc(x)
df2:copy(tdf)
i=i+1
m = m - 1
d2 = df2:dot(s)
end
if success == 1 then
f1 = f2
fx[#fx+1] = f1;
local ss = (df2:dot(df2)-df2:dot(df1)) / df1:dot(df1)
s:mul(ss)
s:add(-1,df2)
local tmp = df1:clone()
df1:copy(df2)
df2:copy(tmp)
d2 = df1:dot(s)
if d2> 0 then
s:copy(df1)
s:mul(-1)
d2 = -s:dot(s)
end
z1 = z1 * math.min(ratio, d1/(d2-1e-320))
d1 = d2
ls_failed = 0
else
x:copy(x0)
f1 = f0
df1:copy(df0)
if ls_failed or i>maxEval then
break
end
local tmp = df1:clone()
df1:copy(df2)
df2:copy(tmp)
s:copy(df1)
s:mul(-1)
d1 = -s:dot(s)
z1 = 1/(1-d1)
ls_failed = 1
end
end
state.df0 = df0
state.df1 = df1
state.df2 = df2
state.df3 = df3
state.x0 = x0
state.s = s
return x,fx,i
end
| 26.956938 | 83 | 0.518814 | 3.21875 |
ef30f0eb8bc9146de87e6daeb25858435abd1a09
| 1,620 |
h
|
C
|
include/Parser.h
|
katm10/HalideCodegen
|
b80856b56d0fc91cc16a047636aca1ce29763175
|
[
"MIT"
] | null | null | null |
include/Parser.h
|
katm10/HalideCodegen
|
b80856b56d0fc91cc16a047636aca1ce29763175
|
[
"MIT"
] | null | null | null |
include/Parser.h
|
katm10/HalideCodegen
|
b80856b56d0fc91cc16a047636aca1ce29763175
|
[
"MIT"
] | null | null | null |
#ifndef TRS_CODEGEN_PARSER_H
#define TRS_CODEGEN_PARSER_H
#include <string>
#include "ast/Types.h"
#include "Rule.h"
// Helper routines for writing a parser and routines for parsing
// Halide rewrite rules.
// Print an error and the remaining chars to be parsed, then abort.
void report_error(const char **cursor, const char *debug_info);
// Move the input cursor past any whitespace, but not beyond the end
// pointer.
void consume_whitespace(const char **cursor, const char *end);
// If the input cursor starts with the expected string, update it to
// point to the end of the string and return true. Otherwise, return
// false and don't modify the input cursor.
bool consume(const char **cursor, const char *end, const char *expected);
// Calls consume and asserts that it succeeded.
void expect(const char **cursor, const char *end, const char *pattern);
// Returns if the input cursor starts with the expected string.
// Will not move the cursor regardless of the result.
bool check(const char **cursor, const char *end, const char *pattern);
// Consume and return a legal Halide identifier.
std::string consume_token(const char **cursor, const char *end);
// Consume and return a legal Halide variable identifier.
std::string consume_name(const char **cursor, const char *end);
// Consume and return an operator token.
std::string consume_op(const char **cursor, const char *end);
// Consume and return a constant integer.
int64_t consume_int(const char **cursor, const char *end);
// Parse a list of Halide rewrite rules.
std::vector<Rule *> parse_rules_from_file(const std::string &filename);
#endif
| 36 | 73 | 0.755556 | 3.203125 |
6603ff7fccb20d8aaab151e581ac7fb145e00178
| 5,056 |
py
|
Python
|
live_class/live_class.py
|
skushagra/SQL
|
f510e4eaa1bedb919eae1509bc6335301460821e
|
[
"MIT"
] | null | null | null |
live_class/live_class.py
|
skushagra/SQL
|
f510e4eaa1bedb919eae1509bc6335301460821e
|
[
"MIT"
] | null | null | null |
live_class/live_class.py
|
skushagra/SQL
|
f510e4eaa1bedb919eae1509bc6335301460821e
|
[
"MIT"
] | null | null | null |
import datetime
import random
from tabulate import tabulate as tba
from mysql.connector.utils import intstore
import mysql.connector
from sys import exit
from sendmail import sendmail, sendpass
mydb = mysql.connector.connect(
host="localhost",
user="root",
password="root"
)
c = mydb.cursor()
c.execute('use sql_projects')
c.execute('select email from admin where app="live_class" or app="live class"')
data = c.fetchall()
data = data[0][0]
pastoch = randint(111111,999999)
sendpass(data, pastoch)
pas = input('OTP : ')
if pas != pastoch:
print('Incorrect Password.')
exit()
print('\nWelcome to Live Class Management.\nKushagraS Version[0.1] \n(c) KushagraS. All Rights Reserved.\n')
while True:
print('Select an appropriate command : \n\t1. Create new class\n\t2. Retrive old class data.\n\t3. Stop attendance for a class\n')
ch = input('Your choice : ')
if ch=='1':
print('Creating new class : \n')
class_name = input('\tClass name : ')
date = str(datetime.date.today())
description = input('\tDescribe the class : ')
instructor = input('\tInstructor : ')
start = input('\tStart time (in 24 hour format) : ')
end = input('\tEnd time (in 24 hour format) : ')
duration = float(end)-float(start)
otp = random.randint(111111, 999999)
uar = ''
MAX_LIMIT = 122
for _ in range(10):
random_integer = random.randint(97, MAX_LIMIT)
# Keep appending random characters using chr(x)
uar += (chr(random_integer))
print('\nNew Class Information\n')
lsa = list()
lsa.append(class_name)
lsa.append(date)
lsa.append(description)
lsa.append(instructor)
lsa.append(start)
lsa.append(end)
lsa.append(otp)
lsa.append(duration*100)
lsa.append(uar)
ls = list()
ls.append(lsa)
print(tba(ls, headers=["Class Name", "Date", "Description", "Isntructor", "Start Time", "End Time", "OTP", "Duration(min)", "Unique Attendance Register"]), '\n\n')
lsa.remove(duration*100)
print('\nShare the OTP and Unique Attendance Register to mark attendance.\n')
cmd = 'INSERT INTO live_class(class_name, class_date, description, instructor, start_time, end_time, otp, uar) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)'
c.execute(cmd, lsa)
mydb.commit()
print('Class created successfully.')
c.execute('select class_id from live_class where class_date="'+date+'" and otp='+str(otp))
id = c.fetchall()
id = id[0][0]
cmd = 'CREATE TABLE '+uar+'(email varchar(50) not null, join_time varchar(20) not null, leave_time varchar(20) not null, poa varchar(1) not null ,PRIMARY KEY ( email ));'
c.execute(cmd)
print('\nAttendance table',uar,'created successfully.\n')
print('Student email will be added to table : '+uar+'\n')
mydb.commit()
if ch=='2':
c.execute('select * from live_class;')
data = c.fetchall()
print(tba(data, headers=["Class Name", "Date", "Description", "Isntructor", "Start Time", "End Time", "OTP", "Class ID", "Unique Attendance Register"]))
print('\n\n')
cho = input('Enter class ID : ')
cmd = 'select uar from live_class where class_id='+cho
c.execute(cmd)
data = c.fetchall()
if data ==[] or data=='':
print('Class not found.')
else:
data = data[0][0]
cmd = 'select * from '+data
c.execute(cmd)
data = c.fetchall()
if data == [] or data == '':
print('No attendance has been marked for this class.\n')
else:
print('\n'+tba(data, headers=["Email", "Join Time", "Leave Time", "P || A"]))
if ch=='3':
c.execute('select * from live_class;')
data = c.fetchall()
print(tba(data, headers=["Class Name", "Date", "Description", "Isntructor", "Start Time", "End Time", "OTP", "Duration(min)", "Unique Attendance Register"]))
print('\n\n')
cho = input('Enter class ID : ')
notp = random.randint(111111,999999)
cmd = 'update live_class set OTP='+str(notp)+' where class_id='+cho
c.execute(cmd)
mydb.commit()
print('OTP has been changend. Attendance will not not be marked using old OTP.\n')
if ch=='exit':
mydb.commit()
mydb.close()
exit()
if ch=='sql -p -f':
print('\nKushagraS Live Class Full Power SQL Command Line\n(c) KushagraS. All Rights Reserved.\nChanges will not be comitted until you exit.\n\n')
while True:
cmd = input('kssql>')
if cmd=='exit':
mydb.commit()
break
c.execute(cmd)
data = c.fetchall()
print(tba(data))
| 38.30303 | 179 | 0.567049 | 3.203125 |
e23a554f5e83319778d34517cfa6357b63d29706
| 5,927 |
py
|
Python
|
algos/deterministic/JP_coloring.py
|
HekpoMaH/algorithmic-concepts-reasoning
|
17c87faad2fbe8481455de34a145a4753a2fe4d0
|
[
"Apache-2.0"
] | 16 |
2021-07-15T21:23:38.000Z
|
2022-02-08T11:19:58.000Z
|
algos/deterministic/JP_coloring.py
|
HekpoMaH/algorithmic-concepts-reasoning
|
17c87faad2fbe8481455de34a145a4753a2fe4d0
|
[
"Apache-2.0"
] | null | null | null |
algos/deterministic/JP_coloring.py
|
HekpoMaH/algorithmic-concepts-reasoning
|
17c87faad2fbe8481455de34a145a4753a2fe4d0
|
[
"Apache-2.0"
] | 1 |
2021-07-22T09:32:30.000Z
|
2021-07-22T09:32:30.000Z
|
import torch
import torch_geometric
from torch_geometric.data import Data
import torch_scatter
import seaborn as sns
import networkx as nx
import matplotlib
import matplotlib.pyplot as plt
from algos.layers.encoders import integer2bit
from algos.hyperparameters import get_hyperparameters
_POS = None
def _draw_pt_graph(G, priority, colors=None):
G = torch_geometric.utils.to_networkx(G)
palette = [sns.color_palette("Paired").as_hex()[i] for i in [0, 1, 2, 3, 4, 5]]
cmap = matplotlib.colors.ListedColormap(palette)
global _POS
pos = nx.spring_layout(G, k=.15, iterations=10) if _POS is None else _POS
if _POS is None:
_POS = dict(pos.items())
num_nodes = len(G.nodes)
plt.figure(figsize=(15, 15))
nx.draw_networkx_nodes(G,
pos,
node_list=G.nodes(),
node_color=colors,
cmap=cmap,
node_size=1200,
alpha=0.9,
with_labels=True,
)
nx.draw_networkx_edges(G, pos)
nx.draw_networkx_labels(G, pos,
dict(zip(range(num_nodes), zip(range(num_nodes), priority.tolist()))), font_size=8)
plt.axis('off')
plt.draw()
def jones_plassmann(G, num_colors=5):
'''
The below algorithm is based on:
M.T.Jones and, P.E.Plassmann, A Parallel Graph Coloring Heuristic, SIAM, Journal of Scienti c Computing 14 (1993) 654
The algorithm takes a graph (a networkx class), randomly assigns a priority
to each node and colours according to the above paper. In a nutshell:
- assume there is a colour order, (e.g. color 1 < color 2 < ... < color |C|,
where |C|=number of colours)
- every node checks if it is uncoloured and has the highest priority
- if it has these two properties then it colours itself in the lowest
colour (according to the ordering) not seen in the neighbourhood.
- the only differences to J.P. algorithm is that we assume a fixed
number of colours (e.g. 5) as NN are known not to be able to 'count'
(ref needed) and we resample the priorities whenever there is a clash
(two nodes with the same priority) or we need more colours than needed
'''
# we have |C|+1 output classes, 1 for uncolored node and |C|=5 for each colour
# Concept encoding
# f0 = colored or not
# f1 = largest uncolored in neighb. or not (i.e. has highest priority in neighb.)
# f2-f6 = colors seen around 1 to 5
# f7-f11 = has been colored in 1 to 5
# E.g. for having colour 3 on the next iteration an ideal explanation is:
# (f0 & f9) | (~f0 & f1 & f2 & f3 & ~f4)
num_nodes = len(G.nodes)
priority = list({'x': x} for x in torch.randint(0, 255, (num_nodes,)).tolist())
priority = dict(zip(range(num_nodes), priority))
nx.set_node_attributes(G, priority)
G = torch_geometric.utils.from_networkx(G)
colored = torch.zeros(num_nodes, dtype=torch.long)
all_inp, all_target_col, all_target_concepts, all_term = [], [], [], []
all_target_concepts_fin = []
n1, n2 = G.edge_index.tolist()
inp_priority = integer2bit(G.x)
last_concepts_real = None
for _ in range(num_nodes+1):
c1h = torch.nn.functional.one_hot(colored, num_classes=num_colors+1)
all_inp.append(torch.cat((c1h, inp_priority), dim=-1).clone())
concepts = torch.zeros((num_nodes, get_hyperparameters()['dim_concept_parallel_coloring']), dtype=torch.int)
concepts[colored != 0, 0] = 1
priority = G.x.clone()
priority[colored != 0] = -1
edge_priority = priority[n2]
received_priority = torch.full((num_nodes,), -1)
torch_scatter.scatter(edge_priority.long(), torch.tensor(n1, dtype=torch.long), reduce='max', out=received_priority)
if ((received_priority == priority) & (priority != -1)).any():
print("REDO: clashing priorities")
return None
to_be_colored = (colored == 0) & (received_priority < priority)
concepts[to_be_colored, 1] = 1
colors_around = torch.zeros(num_nodes, num_colors, dtype=torch.bool)
for i in range(num_colors):
colors_sent = colored[n2] == i+1
rec_color_i = torch.full((num_nodes,), -1)
torch_scatter.scatter(colors_sent.long(), torch.tensor(n1, dtype=torch.long), reduce='max', out=rec_color_i)
colors_around[rec_color_i != -1, i] = rec_color_i[rec_color_i != -1].bool()
colors_to_receive = colors_around.int().min(dim=-1).indices+1
if colors_around.all(dim=-1).any():
print("REDO: colors not enough")
return None
colored = torch.where(to_be_colored, colors_to_receive, colored)
concepts[:, 2:7] = colors_around
concepts_fin = concepts.min(dim=0).values.unsqueeze(0)
all_target_concepts.append(concepts.clone())
all_target_concepts_fin.append(concepts_fin.clone())
all_target_col.append(colored.clone())
all_term.append((colored == 0).any().unsqueeze(-1))
if not (colored == 0).any() and last_concepts_real is None:
last_concepts_real = concepts.clone()
all_target_concepts_fin = [all_target_concepts_fin[i + 1]
for i in range(0, len(all_target_concepts_fin)-1)] + [all_target_concepts_fin[-1]]
data = Data(torch.stack(all_inp, dim=1),
edge_index=torch.tensor(G.edge_index),
y=torch.stack(all_target_col, dim=1),
concepts=torch.stack(all_target_concepts, dim=1),
last_concepts_real=last_concepts_real,
concepts_fin=torch.stack(all_target_concepts_fin, dim=1),
priorities=inp_priority,
termination=torch.stack(all_term, dim=1))
return data
| 44.901515 | 125 | 0.634554 | 3.046875 |
b00b8a141a8d8ee61fe250f486961401a05c069d
| 4,276 |
py
|
Python
|
korbinian/prot_list/uniprot_retrieve.py
|
teese/korbinian
|
3715b40830957f04c4f44b01025449bc6b6a936e
|
[
"MIT"
] | 3 |
2018-03-08T12:03:50.000Z
|
2018-04-09T12:44:39.000Z
|
korbinian/prot_list/uniprot_retrieve.py
|
teese/korbinian
|
3715b40830957f04c4f44b01025449bc6b6a936e
|
[
"MIT"
] | 2 |
2018-06-06T09:51:11.000Z
|
2018-08-06T15:56:41.000Z
|
korbinian/prot_list/uniprot_retrieve.py
|
teese/korbinian
|
3715b40830957f04c4f44b01025449bc6b6a936e
|
[
"MIT"
] | null | null | null |
import os
from Bio import SeqIO
import pandas as pd
import sys
# import debugging tools
from korbinian.utils import pr, pc, pn, aaa
def parse_large_flatfile_with_list_uniprot_accessions(s, input_accession_list, uniprot_dir, logging, selected_uniprot_records_flatfile):
"""Retrieves UniProt flatfiles from a large flatfile (e.g. All UniProt), based on a list of accession numbers.
Parameters
----------
input_accession_list : list
List of accessions.
uniprot_dir : str
Path to UniProt folder, where selected_uniprot_records_flatfile will be saved
list_number : int
List number used to determine the output filename.
logging : logging.Logger
Logger for printing to console and logfile.
selected_uniprot_records_flatfile : str
Path to UniProt flatfile containing selected records for analysis. In this case, the output file.
"""
""" Retrieves UniProt flatfiles from a large flatfile (e.g. All UniProt), based on a list of accession numbers
"""
logging.info('~~~~~~~~~~~~ starting A01_parse_large_flatfile_with_list_uniprot_accessions ~~~~~~~~~~~~')
# parse_large_flatfile_with_list_uniprot_accessions(list_of_uniprot_accessions, uniprot_flatfile_all_single_pass, selected_uniprot_records_flatfile)
# def parse_large_flatfile_with_list_uniprot_accessions(input_accession_list, input_uniprot_flatfile, output_uniprot_flatfile):
# define path to large uniprot flatfile containing the protein records to be extracted
input_uniprot_flatfile = os.path.join(uniprot_dir, "List%02d_large_uniprot_flatfile.txt" % s["list_number"])
output_uniprot_flatfile = selected_uniprot_records_flatfile
# from Bio import SeqIO
# create a list of all the uniprot accessions of the proteins to be selected from the larger uniprot file
accession_list = [line.strip() for line in open(input_accession_list, "r")]
uniprot_index_handle = SeqIO.index(input_uniprot_flatfile, "swiss")
with open(output_uniprot_flatfile, "wb") as output:
for acc in accession_list:
try:
# add the selected records to the file, but adds a new line after each line! Doesn't affect later conversion to SeqRecord object
output.write(uniprot_index_handle.get_raw(acc))
except KeyError:
logging.info("No SwissProt record found in %s for %s." % (input_uniprot_flatfile, acc))
def retrieve_uniprot_data_for_acc_list_in_xlsx_file(excelfile_with_uniprot_accessions, input_uniprot_flatfile, selected_uniprot_records_flatfile, logging):
""" From a list of uniprot accessions in excel, select out desired records from a large UniProt flatfile.
Parameters
----------
excelfile_with_uniprot_accessions : str
Path to excel input file.
logging : logging.Logger
Logger for printing to console and logfile.
selected_uniprot_records_flatfile : str
Path to output UniProt flatfile containing selected records for analysis.
"""
logging.info('~~~~~~~~~~~~ starting retrieve_uniprot_data_for_acc_list_in_xlsx_file ~~~~~~~~~~~~')
# take list of acc, search in default uniprot flatfile. If missing, download from uniprot server.
df_uniprot_accessions = pd.read_excel(excelfile_with_uniprot_accessions, sheetname='uniprot_numbers')
# remove proteins that are marked as 'not included in analysis'
df_uniprot_accessions = df_uniprot_accessions[df_uniprot_accessions['include_in_analysis'] == True]
# accession_list = [line.strip() for line in open(input_accession_list, "r")]
uniprot_index_handle = SeqIO.index(input_uniprot_flatfile, "swiss")
with open(selected_uniprot_records_flatfile, "wb") as output:
for uniprot_accession in df_uniprot_accessions['uniprot_acc']:
try:
# this adds the selected records to the file, but adds a new line after each line!
# Doesn't affect conversion to SeqRecord object)
assert isinstance(uniprot_index_handle, object)
output.write(uniprot_index_handle.get_raw(uniprot_accession))
except KeyError:
sys.stdout.write("No SwissProt record found in %s for %s." % (input_uniprot_flatfile, uniprot_accession))
| 57.783784 | 155 | 0.735033 | 3.21875 |
ea558017988cb42ea54605bd1279ac43c1ef12f3
| 5,713 |
lua
|
Lua
|
App/Base/EventDay/init.lua
|
qwreey75/qwreey.roblox.plugins
|
7b6552a065a033f375b85cb11eb273ca86fb8a49
|
[
"MIT"
] | 5 |
2021-02-08T12:10:36.000Z
|
2021-02-08T12:10:42.000Z
|
App/Base/EventDay/init.lua
|
qwreey75/qwreey.roblox.Plugins
|
7b6552a065a033f375b85cb11eb273ca86fb8a49
|
[
"MIT"
] | 1 |
2021-03-14T14:05:19.000Z
|
2021-03-20T04:01:28.000Z
|
App/Base/EventDay/init.lua
|
qwreey75/qwreey.roblox.plugins
|
7b6552a065a033f375b85cb11eb273ca86fb8a49
|
[
"MIT"
] | null | null | null |
local module = {}
local function CheckBetween(Num,x,y)
if Num == x or Num == y then
return true
end
local min = math.min(x,y)
local max = math.max(x,y)
if Num >= min and Num <= max then
return true
end
return false
end
local FireworkColors = {
Color3.fromRGB(255,0,0);
Color3.fromRGB(0,255,0);
Color3.fromRGB(0,0,255);
Color3.fromRGB(255,255,0);
Color3.fromRGB(0,255,255);
Color3.fromRGB(255,0,255);
Color3.fromRGB(255,255,255);
}
local function UIFireworks(Parent,ScalePosX,AdvancedTween,ParticleHandle)
local ExploTo = math.random(12,16)
local Color = FireworkColors[math.random(1,#FireworkColors)]
local OffsetPosY = math.random(100,200)
local OffsetPosX = math.random(-75,75)
local FlyStart = UDim2.new(ScalePosX,OffsetPosX,1,-1)
local FlyEnd = UDim2.new(ScalePosX,0,1,-OffsetPosY)
local FlyAng = math.deg(math.atan2(OffsetPosY,OffsetPosX))
local FlyPoint = script.FireworkFly:Clone()
FlyPoint.Rotation = FlyAng
FlyPoint.Position = FlyStart
FlyPoint.Parent = Parent
FlyPoint.ImageColor3 = Color
FlyPoint.Point.ImageColor3 = Color
-- 날라가기
AdvancedTween:RunTween(FlyPoint,{
Time = 0.6;
Easing = AdvancedTween.EasingFunctions.Exp4;
Direction = AdvancedTween.EasingDirection.Out;
},{
Position = FlyEnd;
},function()
-- 여러 조각으로 부서트리기
for i = 1,ExploTo do
-- 발사
local FireworkFire = script.FireworkFire:Clone()
FireworkFire.ImageColor3 = Color
FireworkFire.Position = FlyEnd
FireworkFire.Parent = Parent
-- 날라가는 각도 지정
local FireAng = math.random(0,360)
FireworkFire.Rotation = FireAng + 270
-- 물리 연산
local Physics = ParticleHandle:Craft_2DParticleEmitter({
OnUDim = true;
Inertia = 0.01;
Gravity = 0.09;
Vector = ParticleHandle:GetVecByYLine(FireAng,3.7);
Position = Vector2.new(0,-38);
Function = function(Pos,Vec)
FireworkFire.Position = UDim2.new(
ScalePosX,
Pos.X,
1,
-OffsetPosY + Pos.Y
)
FireworkFire.Rotation = math.deg(math.atan2(Vec.Y,Vec.X)) + 180
end;
})
-- 소멸
delay(0.25,function()
AdvancedTween:RunTween(FireworkFire,{
Time = 0.25;
Easing = AdvancedTween.EasingFunctions.Linear;
Direction = AdvancedTween.EasingDirection.Out;
},{
ImageTransparency = 1;
},function()
Physics:Destroy()
FireworkFire:Destroy()
end)
end)
end
-- 폭죽 헤드 없에기
AdvancedTween:RunTweens({FlyPoint,FlyPoint.Point},{
Time = 0.2;
Easing = AdvancedTween.EasingFunctions.Linear;
Direction = AdvancedTween.EasingDirection.Out;
},{
ImageTransparency = 1;
},function()
FlyPoint:Destroy()
end)
end)
end
local Events = {
{
Name = "Christmas";
Check = function(Lang,Date)
if Date.month ~= 12 then
return false
elseif not CheckBetween(Date.day,25,26) then
return false
end
return true
end;
RunEvent = function(ui,Modules)
local ParticleHandle = Modules.ParticleHandle
local MaterialUI = Modules.MaterialUI
local AdvancedTween = Modules.AdvancedTween
local snows = {
"http://www.roblox.com/asset/?id=6130714772";
"http://www.roblox.com/asset/?id=6130714752";
"http://www.roblox.com/asset/?id=6130714736";
"http://www.roblox.com/asset/?id=6130714725";
}
local Focused = false
local Running = false
ui.WindowFocused:Connect(function()
Focused = true
if Running then
return
end
while true do
if not Focused then
break
end
local PosX = math.random(0,100)/100
local this = MaterialUI.Create("ImageLabel",{
AnchorPoint = Vector2.new(0,.51);
Position = UDim2.new(PosX,0,0,0);
Size = UDim2.fromOffset(25,25);
BackgroundTransparency = 1;
Image = snows[math.random(1,#snows)];
ZIndex = 2147483647;
Parent = ui;
ImageTransparency = 0.7;
})
local Physics = ParticleHandle:Craft_2DParticleEmitter({
OnUDim = true;
Inertia = 1;
Gravity = 0;
Vector = ParticleHandle:GetVecByYLine(180,3.2);
Position = Vector2.new(0,0);
Function = function(Pos)
this.Position = UDim2.new(PosX,0,0,Pos.Y)
end;
})
delay(3,function()
if this then
Physics:Destroy()
this:Destroy()
end
end)
wait(0.2)
end
Running = false
end)
ui.WindowFocusReleased:Connect(function()
Focused = false
end)
end;
};
{
Name = "Korea-NewYear";
Check = function(Lang,Date)
if Lang ~= "ko-kr" then
return false
elseif Date.month ~= 2 then
return false
elseif not CheckBetween(Date.day,10,15) then
return false
end
return true
end;
RunEvent = function(ui,Modules)
local ParticleHandle = Modules.ParticleHandle
local MaterialUI = Modules.MaterialUI
local AdvancedTween = Modules.AdvancedTween
local Focused = false
local Running = false
ui.WindowFocused:Connect(function()
Focused = true
if Running then
return
end
for i = 1,10 do
if not Focused then
break
end
UIFireworks(ui,math.random(10,90)/100,AdvancedTween,ParticleHandle)
wait(0.5)
end
Running = false
end)
ui.WindowFocusReleased:Connect(function()
Focused = false
end)
end;
};
}
function module:Setup(ui,MaterialUI,AdvancedTween)
local LangName = game:GetService("LocalizationService").SystemLocaleId
local Date = os.date("*t",os.time())
for _,Event in pairs(Events) do
if Event.Check(LangName,Date) then
local ParticleHandle = require(script.UIParticleEmitter)
Event.RunEvent(ui,{
MaterialUI = MaterialUI;
ParticleHandle = ParticleHandle;
AdvancedTween = AdvancedTween;
})
break
end
end
end
return module
| 23.607438 | 73 | 0.663224 | 3.265625 |
2fd6324d0f091b31d84d2f540ed44c97d23a000a
| 3,509 |
py
|
Python
|
common/bin/namespace_manager.py
|
frankovacevich/aleph
|
9b01dcabf3c074e8617e50fffd35c9ee1960eab6
|
[
"MIT"
] | null | null | null |
common/bin/namespace_manager.py
|
frankovacevich/aleph
|
9b01dcabf3c074e8617e50fffd35c9ee1960eab6
|
[
"MIT"
] | null | null | null |
common/bin/namespace_manager.py
|
frankovacevich/aleph
|
9b01dcabf3c074e8617e50fffd35c9ee1960eab6
|
[
"MIT"
] | null | null | null |
"""
Namespace Manager
-----------------
The namespace manager is the interface between data and the databases. Use the
namespace manager to save data to the database and perform simple queries.
The namespace manager can handle many DBMS. See the db_connections folder to
see the files that connect different types of databases.
Modify the namespace manager to use the database system you want. By default,
the namespace manager uses an SQLite connection.
"""
import traceback
import json
import datetime
import os
from dateutil.tz import tzutc, tzlocal
from dateutil import parser
from .logger import Log
from .root_folder import aleph_root_folder
from .db_connections import functions as fn
from .db_connections.sqlite import SqliteConnection
class NamespaceManager:
def __init__(self):
self.conn = SqliteConnection(os.path.join(aleph_root_folder, "local", "backup", "msql.db"))
self.log = Log("namespace_manager.log")
# ==========================================================================
# Connect and close
# ==========================================================================
def connect(self):
self.conn.connect()
def close(self):
self.conn.close()
# ==========================================================================
# Operations (save, get, delete)
# ==========================================================================
def save_data(self, key, data):
data = fn.__format_data_for_saving__(data)
self.conn.save_data(key, data)
def get_data(self, key, field="*", since=365, until=0, count=100000):
since = fn.__parse_date__(since)
until = fn.__parse_date__(until, True)
return self.conn.get_data(key, field, since, until, count)
def get_data_by_id(self, key, id_):
return self.conn.get_data_by_id(key, id_)
def delete_data(self, key, since, until):
since = fn.__parse_date__(since)
until = fn.__parse_date__(until, True)
return self.conn.delete_data(key, since, until)
def delete_data_by_id(self, key, id_):
return self.conn.delete_data_by_id(key, id_)
# ==========================================================================
# Get keys and fields. Get and set metadata
# ==========================================================================
def get_keys(self):
return self.conn.get_keys()
def get_fields(self, key):
return self.conn.get_fields(key)
def set_metadata(self, key, field, alias, description=""):
if field in ["t", "id", "id_", "t_"]: raise Exception("Invalid field")
self.conn.set_metadata(key, field, str(alias), str(description))
def get_metadata(self, key):
return self.conn.get_metadata(key)
# ==========================================================================
# Remove and rename keys and fields
# ==========================================================================
def remove_key(self, key):
self.conn.remove_key(key)
def remove_field(self, key, field):
if field in ["t", "id", "id_", "t_"]: raise Exception("Invalid field")
self.conn.remove_field(key, field)
def rename_key(self, key, new_key):
self.conn.rename_key(key, new_key)
def rename_field(self, key, field, new_field):
if field in ["t", "id", "id_", "t_"]: raise Exception("Invalid field")
self.conn.rename_field(key, field, new_field)
| 34.742574 | 99 | 0.556854 | 3.296875 |
b73b91877c6c37a97da62a77dd2023b25fd36147
| 1,310 |
cpp
|
C++
|
week 4/Linked List/10. Minimum Platforms .cpp
|
arpit456jain/gfg-11-Weeks-Workshop-on-DSA-in-CPP
|
ed7fd8bc0a581f54ba3a3588dd01013776c4ece6
|
[
"MIT"
] | 6 |
2021-08-06T14:36:41.000Z
|
2022-03-22T11:22:07.000Z
|
week 4/Linked List/10. Minimum Platforms .cpp
|
arpit456jain/11-Weeks-Workshop-on-DSA-in-CPP
|
ed7fd8bc0a581f54ba3a3588dd01013776c4ece6
|
[
"MIT"
] | 1 |
2021-08-09T05:09:48.000Z
|
2021-08-09T05:09:48.000Z
|
week 4/Linked List/10. Minimum Platforms .cpp
|
arpit456jain/11-Weeks-Workshop-on-DSA-in-CPP
|
ed7fd8bc0a581f54ba3a3588dd01013776c4ece6
|
[
"MIT"
] | 1 |
2021-08-09T14:25:17.000Z
|
2021-08-09T14:25:17.000Z
|
// { Driver Code Starts
// Program to find minimum number of platforms
// required on a railway station
#include <bits/stdc++.h>
using namespace std;
// } Driver Code Ends
class Solution{
public:
//Function to find the minimum number of platforms required at the
//railway station such that no train waits.
int findPlatform(int arr[], int dep[], int n)
{
// Your code here
sort(arr,arr+n);
sort(dep,dep+n);
int i=1; // we are considring the 1 train is arived
int j=0; // its not dept
int max_plat = 1;
int plat = 1;
while(i<n && j<n)
{
if(arr[i]<=dep[j])
{
plat++;
i++;
}
else if(arr[i]>dep[j])
{
plat--;
j++;
}
max_plat = max(max_plat,plat);
}
return max_plat;
}
};
// { Driver Code Starts.
// Driver code
int main()
{
int t;
cin>>t;
while(t--)
{
int n;
cin>>n;
int arr[n];
int dep[n];
for(int i=0;i<n;i++)
cin>>arr[i];
for(int j=0;j<n;j++){
cin>>dep[j];
}
Solution ob;
cout <<ob.findPlatform(arr, dep, n)<<endl;
}
return 0;
} // } Driver Code Ends
| 19.264706 | 70 | 0.468702 | 3.125 |
850e2997138ab171cf6ded1d539d8f29a392b26b
| 5,122 |
cs
|
C#
|
MerchantConsole/MenuHandler.cs
|
vorwaldb/InGameMerchant
|
35a4b7deb4d099e3d17314d25e692c6a383e2108
|
[
"CC0-1.0"
] | null | null | null |
MerchantConsole/MenuHandler.cs
|
vorwaldb/InGameMerchant
|
35a4b7deb4d099e3d17314d25e692c6a383e2108
|
[
"CC0-1.0"
] | null | null | null |
MerchantConsole/MenuHandler.cs
|
vorwaldb/InGameMerchant
|
35a4b7deb4d099e3d17314d25e692c6a383e2108
|
[
"CC0-1.0"
] | null | null | null |
using System;
using System.Collections.Generic;
using System.Text;
namespace MerchantConsole
{
/// <summary>
/// Class for handling interactions with the menu
/// </summary>
public class MenuHandler
{
/// <summary>
/// Gets the merchant action the user chooses to engage in
/// </summary>
/// <param name="currentGold"></param>
public MerchantAction GetMerchantAction(int currentGold)
{
const int buyAction = (int)MerchantAction.Buy;
const int sellAction = (int)MerchantAction.Sell;
const int exitAction = (int)MerchantAction.Exit;
var selectionText = $"{Environment.NewLine}{buyAction} - Buy{Environment.NewLine}{sellAction} - Sell{Environment.NewLine}{exitAction} - Exit";
var menuText = $"Welcome to the Travelling Merchant! Have we got good deals for you!{Environment.NewLine}What would you like to do?";
menuText += selectionText;
var goldText = $"{Environment.NewLine}Current Gold: {currentGold}{Environment.NewLine}";
var builder = new StringBuilder();
var dashes = builder.Append('-', goldText.Trim().Length + 2).ToString();
Console.WriteLine($"{dashes}{goldText}{dashes}");
Console.WriteLine(menuText);
Console.WriteLine();
var enteredText = Console.ReadLine()?.Trim();
while (!IsValidMenuSelection(enteredText, 1, 3))
{
Console.WriteLine("Please attempt your selection again.");
Console.WriteLine(selectionText);
enteredText = Console.ReadLine()?.Trim();
}
// ReSharper disable once AssignNullToNotNullAttribute
var menuActionString = int.Parse(enteredText);
return (MerchantAction) menuActionString;
}
/// <summary>
/// For the passed in item list, returns the MenuResult containing the item the user wishes to sell
/// </summary>
/// <param name="itemsToSell"></param>
public MenuResult GetItemToSell(List<Item> itemsToSell)
{
return GetMenuReturnItemForAction(MerchantAction.Sell, itemsToSell);
}
/// <summary>
/// For the passed in item list, returns the MenuResult containing the item the user wishes to buy
/// </summary>
/// <param name="itemsToBuy"></param>
/// <returns></returns>
public MenuResult GetItemToBuy(List<Item> itemsToBuy)
{
return GetMenuReturnItemForAction(MerchantAction.Buy, itemsToBuy);
}
private MenuResult GetMenuReturnItemForAction(MerchantAction actionToPerform, List<Item> menuItems)
{
var maxItemNumber = menuItems.Count;
var backMenu = maxItemNumber + 1;
var textOption = actionToPerform == MerchantAction.Buy ? "buy" : "sell";
var displayText = $"Please choose an item to {textOption}:";
var itemDictionary = new Dictionary<int, Item>();
for (var num = 0; num < maxItemNumber; num++)
{
var item = menuItems[num];
var itemNumber = num + 1;
itemDictionary.Add(itemNumber, item);
var itemAmount = actionToPerform == MerchantAction.Buy ? item.Price : item.GetTradeInValue();
displayText += $"{Environment.NewLine}{itemNumber} - {item.Name} - Price: {itemAmount}";
}
displayText += $"{Environment.NewLine}{backMenu} - Back{Environment.NewLine}";
Console.WriteLine(displayText);
var answer = Console.ReadLine();
while (!IsValidMenuSelection(answer, 1, backMenu))
{
Console.WriteLine("That is not a valid item choice. Please try again.");
Console.WriteLine(displayText);
answer = Console.ReadLine();
}
// ReSharper disable once AssignNullToNotNullAttribute
var selectedAnswer = int.Parse(answer);
if (selectedAnswer == backMenu)
return new MenuResult{ChosenItem = null, IsExitingMenu = true};
var pickedItem = itemDictionary[selectedAnswer];
return new MenuResult {ChosenItem = pickedItem, IsExitingMenu = false};
}
private bool IsValidMenuSelection(string enteredText, int minChoice, int maxChoice)
{
var numberList = new List<int>();
for(var num = minChoice; num <= maxChoice; num++)
{
numberList.Add(num);
}
if(int.TryParse(enteredText, out var number))
{
if (numberList.Contains(number))
return true;
Console.WriteLine("Error: Invalid Menu Selection");
}
else
{
Console.WriteLine("Error: Invalid Input");
}
return false;
}
}
}
| 38.223881 | 154 | 0.572823 | 3.03125 |
0d794bda5a83f1bbe451045c898da3ec8c1f6860
| 968 |
cs
|
C#
|
MethodsExercise/05.AddAndSubtract/Program.cs
|
desata/csharp
|
1de2abdef8da24e872a3768dd4a62d13965cacd5
|
[
"MIT"
] | null | null | null |
MethodsExercise/05.AddAndSubtract/Program.cs
|
desata/csharp
|
1de2abdef8da24e872a3768dd4a62d13965cacd5
|
[
"MIT"
] | null | null | null |
MethodsExercise/05.AddAndSubtract/Program.cs
|
desata/csharp
|
1de2abdef8da24e872a3768dd4a62d13965cacd5
|
[
"MIT"
] | null | null | null |
using System;
namespace _05.AddAndSubtract
{
internal class Program
{
static void Main(string[] args)
{
//You will receive 3 integers.
//Create a method that returns the sum of the first two integers and another method that subtracts the third integer from the result of the sum method.
int integerOne = int.Parse(Console.ReadLine());
int integerTwo = int.Parse(Console.ReadLine());
int integerTree = int.Parse(Console.ReadLine());
int result = PrintResult(integerOne, integerTwo, integerTree);
Console.WriteLine(result);
}
static int PrintResult(int integerOne, int integerTwo, int integerTree)
{
int sum = integerOne + integerTwo;
return Substraction(sum, integerTree);
}
static int Substraction(int sum, int integerTree)
{
return sum - integerTree;
}
}
}
| 30.25 | 163 | 0.606405 | 3.125 |
15f31db8e038f20c2f53e6ca1fdddbb281e786b7
| 1,528 |
sql
|
SQL
|
13. Database Programmability - Lab/DBProgrammability.sql
|
VeselinBPavlov/database-basics-ms-sql
|
860a83370b1fb2a1a955a723524457f8d03ba1f3
|
[
"MIT"
] | 2 |
2019-04-14T21:04:28.000Z
|
2019-12-11T23:12:30.000Z
|
13. Database Programmability - Lab/DBProgrammability.sql
|
VeselinBPavlov/database-basics-ms-sql
|
860a83370b1fb2a1a955a723524457f8d03ba1f3
|
[
"MIT"
] | null | null | null |
13. Database Programmability - Lab/DBProgrammability.sql
|
VeselinBPavlov/database-basics-ms-sql
|
860a83370b1fb2a1a955a723524457f8d03ba1f3
|
[
"MIT"
] | null | null | null |
-- Queries for SoftUni Database
USE SoftUni
GO
-- 1. Count Employees by Town
CREATE OR ALTER FUNCTION ufn_CountEmployeesByTown(@TownName VARCHAR)
RETURNS INT
BEGIN
DECLARE @Count INT;
SET @Count = (SELECT COUNT(e.EmployeeID)
FROM Employees AS e
INNER JOIN Addresses AS a
ON a.AddressID = e.AddressID
INNER JOIN Towns AS t
ON t.TownID = a.TownID
WHERE t.Name = @TownName)
RETURN @Count
END
GO
-- 2. Employees Promotion
CREATE OR ALTER PROCEDURE usp_RaiseSalaries(@DepartmentName VARCHAR) AS
BEGIN
UPDATE Employees
SET Salary *= 1.05
WHERE DepartmentID = (SELECT DepartmentID
FROM Departments
WHERE [Name] = @DepartmentName)
END
GO
-- 3. Employees Promotion By ID
CREATE OR ALTER PROCEDURE usp_RaiseSalaryById(@Id INT) AS
BEGIN
DECLARE @EmployeeId INT = (SELECT EmployeeID
FROM Employees
WHERE EmployeeID = @Id)
IF (@EmployeeId IS NOT NULL)
BEGIN
UPDATE Employees
SET Salary *= 1.05
WHERE EmployeeID = @EmployeeId
END
END
GO
-- 4. Triggered
CREATE TABLE DeletedEmployees (
[EmployeeId] INT PRIMARY KEY,
[FirstName] NVARCHAR(50),
[LastName] NVARCHAR(50),
[MiddleName] NVARCHAR(50),
[JobTitle] NVARCHAR(50),
[DepartmentId] INT,
[Salary] DECIMAL(15, 2)
)
GO
CREATE TRIGGER t_DeletedEmployees ON Employees AFTER DELETE AS
INSERT INTO DeletedEmployees
([EmployeeId], [FirstName], [LastName], [MiddleName], [JobTitle], [DepartmentId], [Salary])
SELECT [EmployeeId], [FirstName], [LastName], [MiddleName], [JobTitle], [DepartmentId], [Salary]
FROM deleted
GO
| 22.80597 | 96 | 0.731021 | 3.296875 |
daa74de61db20e692bd7a910fa512803dd49ca43
| 7,996 |
php
|
PHP
|
app/Http/Schedules/NotificationScheduler.php
|
Shahrampeyvandi/charsoo_pannel
|
8f2ce9cc3aa71f32d72656b09eed699bfc03980c
|
[
"MIT"
] | null | null | null |
app/Http/Schedules/NotificationScheduler.php
|
Shahrampeyvandi/charsoo_pannel
|
8f2ce9cc3aa71f32d72656b09eed699bfc03980c
|
[
"MIT"
] | null | null | null |
app/Http/Schedules/NotificationScheduler.php
|
Shahrampeyvandi/charsoo_pannel
|
8f2ce9cc3aa71f32d72656b09eed699bfc03980c
|
[
"MIT"
] | null | null | null |
<?php
namespace App\Http\Schedules;
use App\Models\Notifications\Notifications;
use App\Models\Personals\Personal;
use App\Models\Cunsomers\Cunsomer;
use App\Models\Notifications\PannelNotifications;
use App\Models\User;
use App\Models\Services\Service;
use Spatie\Permission\Models\Role;
class NotificationScheduler
{
public function __invoke()
{
echo date('Y-m-d H:00:00')."asdas".PHP_EOL;
$notifications=Notifications::where('sent',0)->where('send',date('Y-m-d H:00:00'))->get();
if(count($notifications)){
foreach($notifications as $notification){
if($notification->sent == 0){
//$notification = Notifications::find($notification->id);
$notification->sent=1;
$notification->send = date('Y-m-d H:i:s');
$array = unserialize( $notification->list );
if($notification->to == 'مشتری ها'){
foreach($array as $key=>$fard){
if($fard == 0){
$members = Cunsomer::all();
}else{
$members[] = Cunsomer::find($fard);
}
}
foreach($members as $member){
if($notification->how == 'پیامک'){
$this->sendsms($member->customer_mobile,$notification->text,$notification->smstemplate);
}else if($notification->how == 'نوتیفیکیشن'){
$this->sendnotification($member->firebase_token/$notification->title,$notification->text);
}else{
$this->sendsms($member->customer_mobile,$notification->text,$notification->smstemplate);
$this->sendnotification($member->firebase_token,$notification->title,$notification->text);
}
}
}else if($notification->to == 'خدمت رسان ها'){
foreach($array as $key=>$fard){
if($fard == 0){
if($notification->broker){
foreach (Service::where('service_role', $role->name)->get() as $key => $service) {
foreach ($service->personal as $key => $personal) {
$personalslist[] = $personal;
}
}
$ids=[];
foreach($personalslist as $key=>$personal){
$id=$personal->id;
$repe=false;
for($x = 0; $x < count($ids); $x++){
if($ids[$x]==$id){
$repe=true;
break;
}
}
if($repe){
$members[]=$personal;
}
$ids[]=$id;
}
}else{
$members = Personal::all();
}
}else{
$members[] = Personal::find($fard);
}
}
foreach($members as $member){
if($notification->how == 'پیامک'){
$this->sendsms($member->personal_mobile,$notification->text,$notification->smstemplate);
}else if($notification->how == 'نوتیفیکیشن'){
$this->sendnotification($member->firebase_token,$notification->title,$notification->text);
}else{
$this->sendsms($member->personal_mobile,$notification->text,$notification->smstemplate);
$this->sendnotification($member->firebase_token,$notification->title,$notification->text);
}
}
}else{
foreach ($array as $key => $fard) {
if ($fard == 0) {
$members = User::all();
} else {
$members[] = User::find($fard);
}
}
foreach ($members as $member) {
if ($notification->how == 'پیامک') {
$this->sendsms($member->user_mobile, $notification->text, $notification->smstemplate);
} else {
echo date('Y-m-d H:00:00')."sendnotification".PHP_EOL;
$pannelnotification=new PannelNotifications;
$pannelnotification->title=$notification->title;
$pannelnotification->text=$notification->text;
$pannelnotification->users_id=$member->id;
$pannelnotification->notifications_id=$notification->id;
$pannelnotification->save();
}
}
}
$notification->update();
}
}
}
}
public function sendsms($phone , $text,$template){
echo date('Y-m-d H:00:00')."sms".PHP_EOL;
$apikey = '5079544B44782F41475237506D6A4C46713837717571386D6D784636486C666D';
$receptor = $phone;
$token = $text;
$template = $template;
$api = new \Kavenegar\KavenegarApi($apikey);
try {
$api->VerifyLookup($receptor, $token, null, null, $template);
} catch (\Kavenegar\Exceptions\ApiException $e) {
//return response()->json(['message' => 'مشکل پنل پیامکی پیش آمده است =>' . $e->errorMessage()], 400);
return response()->json(['code'=> $token ,'error' => 'مشکل پنل پیامکی پیش آمده است =>' . $e->errorMessage()
],500);
} catch (\Kavenegar\Exceptions\HttpException $e) {
return response()->json(['code'=> $token,'error' => 'مشکل اتصال پیش امده است =>' . $e->errorMessage()],500);
}
return response()->json(['code' => $token], 200);
// return response()->json(['data'=> ['code' => $token] ],200);
}
public function sendnotification($firebasetoken ,$title, $text){
echo date('Y-m-d H:00:00')."sendnotification".PHP_EOL;
$fcmUrl = 'https://fcm.googleapis.com/fcm/send';
$notification = [
'title' => $text,
'sound' => true,
];
$extraNotificationData = ["message" => $title, "moredata" => $title];
$fcmNotification = [
//'registration_ids' => $tokenList, //multple token array
'to' => $firebasetoken, //single token
'notification' => $notification,
'data' => $extraNotificationData
];
$serverkey = env('FIREBASE_LEGACY_SERVER_KEY');
$headers = [
'Authorization: key=' . $serverkey,
'Content-Type: application/json'
];
$ch = curl_init();
curl_setopt($ch, CURLOPT_URL, $fcmUrl);
curl_setopt($ch, CURLOPT_POST, true);
curl_setopt($ch, CURLOPT_HTTPHEADER, $headers);
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
curl_setopt($ch, CURLOPT_SSL_VERIFYPEER, false);
curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($fcmNotification));
$result = curl_exec($ch);
curl_close($ch);
//dd($ch);
return true;
}
}
| 27.572414 | 120 | 0.440845 | 3.015625 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.