project
stringclasses 788
values | commit_id
stringlengths 6
81
| CVE ID
stringlengths 13
16
| CWE ID
stringclasses 126
values | func
stringlengths 14
482k
| vul
int8 0
1
|
---|---|---|---|---|---|
tensorflow | a7c02f1a9bbc35473969618a09ee5f9f5d3e52d9 | NOT_APPLICABLE | NOT_APPLICABLE | void operator()(const CPUDevice& d, typename Functor::tout_type out,
typename Functor::tin_type in0,
typename Functor::tin_type in1, bool* error) {
Assign(d, out, in0.binaryExpr(in1, typename Functor::func()));
} | 0 |
lepton | 6a5ceefac1162783fffd9506a3de39c85c725761 | NOT_APPLICABLE | NOT_APPLICABLE | bool read_jpeg_and_copy_to_side_channel(std::vector<std::pair<uint32_t,
uint32_t>> *huff_input_offset,
ibytestreamcopier *jpg_str_in,
Sirikata::Array1d<uint8_t, 2> header,
bool is_embedded_jpeg) {
return read_jpeg(huff_input_offset, jpg_str_in, header, is_embedded_jpeg);
} | 0 |
php-src | 7245bff300d3fa8bacbef7897ff080a6f1c23eba | NOT_APPLICABLE | NOT_APPLICABLE | static HashTable* spl_filesystem_object_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */
{
spl_filesystem_object *intern = (spl_filesystem_object*)zend_object_store_get_object(obj TSRMLS_CC);
HashTable *rv;
zval *tmp, zrv;
char *pnstr, *path;
int pnlen, path_len;
char stmp[2];
*is_temp = 1;
if (!intern->std.properties) {
rebuild_object_properties(&intern->std);
}
ALLOC_HASHTABLE(rv);
ZEND_INIT_SYMTABLE_EX(rv, zend_hash_num_elements(intern->std.properties) + 3, 0);
INIT_PZVAL(&zrv);
Z_ARRVAL(zrv) = rv;
zend_hash_copy(rv, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *));
pnstr = spl_gen_private_prop_name(spl_ce_SplFileInfo, "pathName", sizeof("pathName")-1, &pnlen TSRMLS_CC);
path = spl_filesystem_object_get_pathname(intern, &path_len TSRMLS_CC);
add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, path, path_len, 1);
efree(pnstr);
if (intern->file_name) {
pnstr = spl_gen_private_prop_name(spl_ce_SplFileInfo, "fileName", sizeof("fileName")-1, &pnlen TSRMLS_CC);
spl_filesystem_object_get_path(intern, &path_len TSRMLS_CC);
if (path_len && path_len < intern->file_name_len) {
add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->file_name + path_len + 1, intern->file_name_len - (path_len + 1), 1);
} else {
add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->file_name, intern->file_name_len, 1);
}
efree(pnstr);
}
if (intern->type == SPL_FS_DIR) {
#ifdef HAVE_GLOB
pnstr = spl_gen_private_prop_name(spl_ce_DirectoryIterator, "glob", sizeof("glob")-1, &pnlen TSRMLS_CC);
if (php_stream_is(intern->u.dir.dirp ,&php_glob_stream_ops)) {
add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->_path, intern->_path_len, 1);
} else {
add_assoc_bool_ex(&zrv, pnstr, pnlen+1, 0);
}
efree(pnstr);
#endif
pnstr = spl_gen_private_prop_name(spl_ce_RecursiveDirectoryIterator, "subPathName", sizeof("subPathName")-1, &pnlen TSRMLS_CC);
if (intern->u.dir.sub_path) {
add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->u.dir.sub_path, intern->u.dir.sub_path_len, 1);
} else {
add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, "", 0, 1);
}
efree(pnstr);
}
if (intern->type == SPL_FS_FILE) {
pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "openMode", sizeof("openMode")-1, &pnlen TSRMLS_CC);
add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, intern->u.file.open_mode, intern->u.file.open_mode_len, 1);
efree(pnstr);
stmp[1] = '\0';
stmp[0] = intern->u.file.delimiter;
pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "delimiter", sizeof("delimiter")-1, &pnlen TSRMLS_CC);
add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, stmp, 1, 1);
efree(pnstr);
stmp[0] = intern->u.file.enclosure;
pnstr = spl_gen_private_prop_name(spl_ce_SplFileObject, "enclosure", sizeof("enclosure")-1, &pnlen TSRMLS_CC);
add_assoc_stringl_ex(&zrv, pnstr, pnlen+1, stmp, 1, 1);
efree(pnstr);
}
return rv;
} | 0 |
savannah | 4d4f992826a4962790ecd0cce6fbba4a415ce149 | NOT_APPLICABLE | NOT_APPLICABLE | _asn1_delete_list_and_nodes (void)
{
list_type *listElement;
while (firstElement)
{
listElement = firstElement;
firstElement = firstElement->next;
_asn1_remove_node (listElement->node, 0);
free (listElement);
}
}
| 0 |
Chrome | 6d2aef28cb0b677af468ebf3e32a176a7c37086e | NOT_APPLICABLE | NOT_APPLICABLE | void AudioOutputDevice::PauseOnIOThread(bool flush) {
DCHECK(message_loop()->BelongsToCurrentThread());
if (stream_id_ && is_started_) {
ipc_->PauseStream(stream_id_);
if (flush)
ipc_->FlushStream(stream_id_);
} else {
play_on_start_ = false;
}
}
| 0 |
libgadu | 77fdc9351bf5c1913c7fc518f8a0c0c87ab3860f | NOT_APPLICABLE | NOT_APPLICABLE | static gg_action_t gg_handle_connected(struct gg_session *sess, struct gg_event *e, enum gg_state_t next_state, enum gg_state_t alt_state, enum gg_state_t alt2_state)
{
#if 0
char buf[1024];
int res;
if (gg_send_queued_data(sess) == -1)
return GG_ACTION_FAIL;
res = gg_read(sess, buf, sizeof(buf));
if (res == -1 && (errno == EAGAIN || errno == EINTR)) {
gg_debug_session(sess, GG_DEBUG_MISC, "// gg_watch_fd() non-critical read error (errno=%d, %s)\n", errno, strerror(errno));
return GG_ACTION_WAIT;
}
if (res == -1 || res == 0) {
if (res == -1)
gg_debug_session(sess, GG_DEBUG_MISC, "// gg_watch_fd() read error (errno=%d, %s)\n", errno, strerror(errno));
else
gg_debug_session(sess, GG_DEBUG_MISC, "// gg_watch_fd() connection closed\n");
if (sess->state == GG_STATE_DISCONNECTING && res == 0) {
e->type = GG_EVENT_DISCONNECT_ACK;
} else if (sess->state == GG_STATE_READING_KEY) {
e->event.failure = GG_FAILURE_INVALID;
return GG_ACTION_FAIL;
}
return GG_ACTION_FAIL;
}
gg_debug_dump(sess, GG_DEBUG_DUMP, buf, res);
if (gg_session_handle_data(sess, buf, res, e) == -1)
return GG_ACTION_FAIL;
if (sess->send_buf != NULL)
sess->check |= GG_CHECK_WRITE;
return GG_ACTION_WAIT;
#else
struct gg_header *gh;
if (gg_send_queued_data(sess) == -1)
return GG_ACTION_FAIL;
gh = gg_recv_packet(sess);
if (gh == NULL) {
if (sess->state == GG_STATE_DISCONNECTING) {
gg_debug_session(sess, GG_DEBUG_MISC, "// gg_watch_fd() connection broken expectedly\n");
e->type = GG_EVENT_DISCONNECT_ACK;
return GG_ACTION_WAIT;
}
if (errno != EAGAIN) {
gg_debug_session(sess, GG_DEBUG_MISC, "// gg_watch_fd() gg_recv_packet failed (errno=%d, %s)\n", errno, strerror(errno));
return GG_ACTION_FAIL;
}
} else {
if (gg_session_handle_packet(sess, gh->type, (const char *) gh + sizeof(struct gg_header), gh->length, e) == -1) {
free(gh);
return GG_ACTION_FAIL;
}
free(gh);
}
sess->check = GG_CHECK_READ;
if (sess->send_buf != NULL)
sess->check |= GG_CHECK_WRITE;
return GG_ACTION_WAIT;
#endif
} | 0 |
nfdump | 3b006ededaf351f1723aea6c727c9edd1b1fff9b | NOT_APPLICABLE | NOT_APPLICABLE | static void Process_ipfix_templates(exporter_ipfix_domain_t *exporter, void *flowset_header, uint32_t size_left, FlowSource_t *fs) {
ipfix_template_record_t *ipfix_template_record;
void *DataPtr;
uint32_t count;
size_left -= 4; // subtract message header
DataPtr = flowset_header + 4;
ipfix_template_record = (ipfix_template_record_t *)DataPtr;
count = ntohs(ipfix_template_record->FieldCount);
if ( count == 0 ) {
Process_ipfix_template_withdraw(exporter, DataPtr, size_left, fs);
} else {
Process_ipfix_template_add(exporter, DataPtr, size_left, fs);
}
} // End of Process_ipfix_templates
| 0 |
vim | f50940531dd57135fe60aa393ac9d3281f352d88 | NOT_APPLICABLE | NOT_APPLICABLE | unref_extmatch(reg_extmatch_T *em)
{
int i;
if (em != NULL && --em->refcnt <= 0)
{
for (i = 0; i < NSUBEXP; ++i)
vim_free(em->matches[i]);
vim_free(em);
}
} | 0 |
linux | fb58fdcd295b914ece1d829b24df00a17a9624bc | NOT_APPLICABLE | NOT_APPLICABLE | static int intel_iommu_add_device(struct device *dev)
{
struct intel_iommu *iommu;
struct iommu_group *group;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return -ENODEV;
iommu_device_link(&iommu->iommu, dev);
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
iommu_group_put(group);
return 0;
} | 0 |
Chrome | ab5e55ff333def909d025ac45da9ffa0d88a63f2 | NOT_APPLICABLE | NOT_APPLICABLE | void RTCPeerConnection::stop()
{
m_iceState = IceStateClosed;
m_readyState = ReadyStateClosed;
if (m_peerHandler) {
m_peerHandler->stop();
m_peerHandler.clear();
}
}
| 0 |
openssl | 8108e0a6db133f3375608303fdd2083eb5115062 | CVE-2016-7798 | CWE-310 | Init_ossl_cipher(void)
{
#if 0
mOSSL = rb_define_module("OpenSSL");
eOSSLError = rb_define_class_under(mOSSL, "OpenSSLError", rb_eStandardError);
#endif
/* Document-class: OpenSSL::Cipher
*
* Provides symmetric algorithms for encryption and decryption. The
* algorithms that are available depend on the particular version
* of OpenSSL that is installed.
*
* === Listing all supported algorithms
*
* A list of supported algorithms can be obtained by
*
* puts OpenSSL::Cipher.ciphers
*
* === Instantiating a Cipher
*
* There are several ways to create a Cipher instance. Generally, a
* Cipher algorithm is categorized by its name, the key length in bits
* and the cipher mode to be used. The most generic way to create a
* Cipher is the following
*
* cipher = OpenSSL::Cipher.new('<name>-<key length>-<mode>')
*
* That is, a string consisting of the hyphenated concatenation of the
* individual components name, key length and mode. Either all uppercase
* or all lowercase strings may be used, for example:
*
* cipher = OpenSSL::Cipher.new('AES-128-CBC')
*
* For each algorithm supported, there is a class defined under the
* Cipher class that goes by the name of the cipher, e.g. to obtain an
* instance of AES, you could also use
*
* # these are equivalent
* cipher = OpenSSL::Cipher::AES.new(128, :CBC)
* cipher = OpenSSL::Cipher::AES.new(128, 'CBC')
* cipher = OpenSSL::Cipher::AES.new('128-CBC')
*
* Finally, due to its wide-spread use, there are also extra classes
* defined for the different key sizes of AES
*
* cipher = OpenSSL::Cipher::AES128.new(:CBC)
* cipher = OpenSSL::Cipher::AES192.new(:CBC)
* cipher = OpenSSL::Cipher::AES256.new(:CBC)
*
* === Choosing either encryption or decryption mode
*
* Encryption and decryption are often very similar operations for
* symmetric algorithms, this is reflected by not having to choose
* different classes for either operation, both can be done using the
* same class. Still, after obtaining a Cipher instance, we need to
* tell the instance what it is that we intend to do with it, so we
* need to call either
*
* cipher.encrypt
*
* or
*
* cipher.decrypt
*
* on the Cipher instance. This should be the first call after creating
* the instance, otherwise configuration that has already been set could
* get lost in the process.
*
* === Choosing a key
*
* Symmetric encryption requires a key that is the same for the encrypting
* and for the decrypting party and after initial key establishment should
* be kept as private information. There are a lot of ways to create
* insecure keys, the most notable is to simply take a password as the key
* without processing the password further. A simple and secure way to
* create a key for a particular Cipher is
*
* cipher = OpenSSL::AES256.new(:CFB)
* cipher.encrypt
* key = cipher.random_key # also sets the generated key on the Cipher
*
* If you absolutely need to use passwords as encryption keys, you
* should use Password-Based Key Derivation Function 2 (PBKDF2) by
* generating the key with the help of the functionality provided by
* OpenSSL::PKCS5.pbkdf2_hmac_sha1 or OpenSSL::PKCS5.pbkdf2_hmac.
*
* Although there is Cipher#pkcs5_keyivgen, its use is deprecated and
* it should only be used in legacy applications because it does not use
* the newer PKCS#5 v2 algorithms.
*
* === Choosing an IV
*
* The cipher modes CBC, CFB, OFB and CTR all need an "initialization
* vector", or short, IV. ECB mode is the only mode that does not require
* an IV, but there is almost no legitimate use case for this mode
* because of the fact that it does not sufficiently hide plaintext
* patterns. Therefore
*
* <b>You should never use ECB mode unless you are absolutely sure that
* you absolutely need it</b>
*
* Because of this, you will end up with a mode that explicitly requires
* an IV in any case. Note that for backwards compatibility reasons,
* setting an IV is not explicitly mandated by the Cipher API. If not
* set, OpenSSL itself defaults to an all-zeroes IV ("\\0", not the
* character). Although the IV can be seen as public information, i.e.
* it may be transmitted in public once generated, it should still stay
* unpredictable to prevent certain kinds of attacks. Therefore, ideally
*
* <b>Always create a secure random IV for every encryption of your
* Cipher</b>
*
* A new, random IV should be created for every encryption of data. Think
* of the IV as a nonce (number used once) - it's public but random and
* unpredictable. A secure random IV can be created as follows
*
* cipher = ...
* cipher.encrypt
* key = cipher.random_key
* iv = cipher.random_iv # also sets the generated IV on the Cipher
*
* Although the key is generally a random value, too, it is a bad choice
* as an IV. There are elaborate ways how an attacker can take advantage
* of such an IV. As a general rule of thumb, exposing the key directly
* or indirectly should be avoided at all cost and exceptions only be
* made with good reason.
*
* === Calling Cipher#final
*
* ECB (which should not be used) and CBC are both block-based modes.
* This means that unlike for the other streaming-based modes, they
* operate on fixed-size blocks of data, and therefore they require a
* "finalization" step to produce or correctly decrypt the last block of
* data by appropriately handling some form of padding. Therefore it is
* essential to add the output of OpenSSL::Cipher#final to your
* encryption/decryption buffer or you will end up with decryption errors
* or truncated data.
*
* Although this is not really necessary for streaming-mode ciphers, it is
* still recommended to apply the same pattern of adding the output of
* Cipher#final there as well - it also enables you to switch between
* modes more easily in the future.
*
* === Encrypting and decrypting some data
*
* data = "Very, very confidential data"
*
* cipher = OpenSSL::Cipher::AES.new(128, :CBC)
* cipher.encrypt
* key = cipher.random_key
* iv = cipher.random_iv
*
* encrypted = cipher.update(data) + cipher.final
* ...
* decipher = OpenSSL::Cipher::AES.new(128, :CBC)
* decipher.decrypt
* decipher.key = key
* decipher.iv = iv
*
* plain = decipher.update(encrypted) + decipher.final
*
* puts data == plain #=> true
*
* === Authenticated Encryption and Associated Data (AEAD)
*
* If the OpenSSL version used supports it, an Authenticated Encryption
* mode (such as GCM or CCM) should always be preferred over any
* unauthenticated mode. Currently, OpenSSL supports AE only in combination
* with Associated Data (AEAD) where additional associated data is included
* in the encryption process to compute a tag at the end of the encryption.
* This tag will also be used in the decryption process and by verifying
* its validity, the authenticity of a given ciphertext is established.
*
* This is superior to unauthenticated modes in that it allows to detect
* if somebody effectively changed the ciphertext after it had been
* encrypted. This prevents malicious modifications of the ciphertext that
* could otherwise be exploited to modify ciphertexts in ways beneficial to
* potential attackers.
*
* An associated data is used where there is additional information, such as
* headers or some metadata, that must be also authenticated but not
* necessarily need to be encrypted. If no associated data is needed for
* encryption and later decryption, the OpenSSL library still requires a
* value to be set - "" may be used in case none is available.
*
* An example using the GCM (Galois/Counter Mode). You have 16 bytes +key+,
* 12 bytes (96 bits) +nonce+ and the associated data +auth_data+. Be sure
* not to reuse the +key+ and +nonce+ pair. Reusing an nonce ruins the
* security gurantees of GCM mode.
*
* cipher = OpenSSL::Cipher::AES.new(128, :GCM).encrypt
* cipher.key = key
* cipher.iv = nonce
* cipher.auth_data = auth_data
*
* encrypted = cipher.update(data) + cipher.final
* tag = cipher.auth_tag # produces 16 bytes tag by default
*
* Now you are the receiver. You know the +key+ and have received +nonce+,
* +auth_data+, +encrypted+ and +tag+ through an untrusted network. Note
* that GCM accepts an arbitrary length tag between 1 and 16 bytes. You may
* additionally need to check that the received tag has the correct length,
* or you allow attackers to forge a valid single byte tag for the tampered
* ciphertext with a probability of 1/256.
*
* raise "tag is truncated!" unless tag.bytesize == 16
* decipher = OpenSSL::Cipher::AES.new(128, :GCM).decrypt
* decipher.key = key
* decipher.iv = nonce
* decipher.auth_tag = tag
* decipher.auth_data = auth_data
*
* decrypted = decipher.update(encrypted) + decipher.final
*
* puts data == decrypted #=> true
*/
cCipher = rb_define_class_under(mOSSL, "Cipher", rb_cObject);
eCipherError = rb_define_class_under(cCipher, "CipherError", eOSSLError);
rb_define_alloc_func(cCipher, ossl_cipher_alloc);
rb_define_copy_func(cCipher, ossl_cipher_copy);
rb_define_module_function(cCipher, "ciphers", ossl_s_ciphers, 0);
rb_define_method(cCipher, "initialize", ossl_cipher_initialize, 1);
rb_define_method(cCipher, "reset", ossl_cipher_reset, 0);
rb_define_method(cCipher, "encrypt", ossl_cipher_encrypt, -1);
rb_define_method(cCipher, "decrypt", ossl_cipher_decrypt, -1);
rb_define_method(cCipher, "pkcs5_keyivgen", ossl_cipher_pkcs5_keyivgen, -1);
rb_define_method(cCipher, "update", ossl_cipher_update, -1);
rb_define_method(cCipher, "final", ossl_cipher_final, 0);
rb_define_method(cCipher, "name", ossl_cipher_name, 0);
rb_define_method(cCipher, "key=", ossl_cipher_set_key, 1);
rb_define_method(cCipher, "auth_data=", ossl_cipher_set_auth_data, 1);
rb_define_method(cCipher, "auth_tag=", ossl_cipher_set_auth_tag, 1);
rb_define_method(cCipher, "auth_tag", ossl_cipher_get_auth_tag, -1);
rb_define_method(cCipher, "auth_tag_len=", ossl_cipher_set_auth_tag_len, 1);
rb_define_method(cCipher, "authenticated?", ossl_cipher_is_authenticated, 0);
rb_define_method(cCipher, "key_len=", ossl_cipher_set_key_length, 1);
rb_define_method(cCipher, "key_len", ossl_cipher_key_length, 0);
rb_define_method(cCipher, "iv=", ossl_cipher_set_iv, 1);
rb_define_method(cCipher, "iv_len=", ossl_cipher_set_iv_length, 1);
rb_define_method(cCipher, "iv_len", ossl_cipher_iv_length, 0);
rb_define_method(cCipher, "block_size", ossl_cipher_block_size, 0);
rb_define_method(cCipher, "padding=", ossl_cipher_set_padding, 1);
id_auth_tag_len = rb_intern_const("auth_tag_len");
}
| 1 |
tcpdump | 88b2dac837e81cf56dce05e6e7b5989332c0092d | NOT_APPLICABLE | NOT_APPLICABLE | ospf6_print_lsa(netdissect_options *ndo,
register const struct lsa6 *lsap, const u_char *dataend)
{
register const struct rlalink6 *rlp;
#if 0
register const struct tos_metric *tosp;
#endif
register const rtrid_t *ap;
#if 0
register const struct aslametric *almp;
register const struct mcla *mcp;
#endif
register const struct llsa *llsap;
register const struct lsa6_prefix *lsapp;
#if 0
register const uint32_t *lp;
#endif
register u_int prefixes;
register int bytelen;
register u_int length, lsa_length;
uint32_t flags32;
const uint8_t *tptr;
if (ospf6_print_lshdr(ndo, &lsap->ls_hdr, dataend))
return (1);
ND_TCHECK(lsap->ls_hdr.ls_length);
length = EXTRACT_16BITS(&lsap->ls_hdr.ls_length);
/*
* The LSA length includes the length of the header;
* it must have a value that's at least that length.
* If it does, find the length of what follows the
* header.
*/
if (length < sizeof(struct lsa6_hdr) || (const u_char *)lsap + length > dataend)
return (1);
lsa_length = length - sizeof(struct lsa6_hdr);
tptr = (const uint8_t *)lsap+sizeof(struct lsa6_hdr);
switch (EXTRACT_16BITS(&lsap->ls_hdr.ls_type)) {
case LS_TYPE_ROUTER | LS_SCOPE_AREA:
if (lsa_length < sizeof (lsap->lsa_un.un_rla.rla_options))
return (1);
lsa_length -= sizeof (lsap->lsa_un.un_rla.rla_options);
ND_TCHECK(lsap->lsa_un.un_rla.rla_options);
ND_PRINT((ndo, "\n\t Options [%s]",
bittok2str(ospf6_option_values, "none",
EXTRACT_32BITS(&lsap->lsa_un.un_rla.rla_options))));
ND_PRINT((ndo, ", RLA-Flags [%s]",
bittok2str(ospf6_rla_flag_values, "none",
lsap->lsa_un.un_rla.rla_flags)));
rlp = lsap->lsa_un.un_rla.rla_link;
while (lsa_length != 0) {
if (lsa_length < sizeof (*rlp))
return (1);
lsa_length -= sizeof (*rlp);
ND_TCHECK(*rlp);
switch (rlp->link_type) {
case RLA_TYPE_VIRTUAL:
ND_PRINT((ndo, "\n\t Virtual Link: Neighbor Router-ID %s"
"\n\t Neighbor Interface-ID %s, Interface %s",
ipaddr_string(ndo, &rlp->link_nrtid),
ipaddr_string(ndo, &rlp->link_nifid),
ipaddr_string(ndo, &rlp->link_ifid)));
break;
case RLA_TYPE_ROUTER:
ND_PRINT((ndo, "\n\t Neighbor Router-ID %s"
"\n\t Neighbor Interface-ID %s, Interface %s",
ipaddr_string(ndo, &rlp->link_nrtid),
ipaddr_string(ndo, &rlp->link_nifid),
ipaddr_string(ndo, &rlp->link_ifid)));
break;
case RLA_TYPE_TRANSIT:
ND_PRINT((ndo, "\n\t Neighbor Network-ID %s"
"\n\t Neighbor Interface-ID %s, Interface %s",
ipaddr_string(ndo, &rlp->link_nrtid),
ipaddr_string(ndo, &rlp->link_nifid),
ipaddr_string(ndo, &rlp->link_ifid)));
break;
default:
ND_PRINT((ndo, "\n\t Unknown Router Links Type 0x%02x",
rlp->link_type));
return (0);
}
ND_PRINT((ndo, ", metric %d", EXTRACT_16BITS(&rlp->link_metric)));
rlp++;
}
break;
case LS_TYPE_NETWORK | LS_SCOPE_AREA:
if (lsa_length < sizeof (lsap->lsa_un.un_nla.nla_options))
return (1);
lsa_length -= sizeof (lsap->lsa_un.un_nla.nla_options);
ND_TCHECK(lsap->lsa_un.un_nla.nla_options);
ND_PRINT((ndo, "\n\t Options [%s]",
bittok2str(ospf6_option_values, "none",
EXTRACT_32BITS(&lsap->lsa_un.un_nla.nla_options))));
ND_PRINT((ndo, "\n\t Connected Routers:"));
ap = lsap->lsa_un.un_nla.nla_router;
while (lsa_length != 0) {
if (lsa_length < sizeof (*ap))
return (1);
lsa_length -= sizeof (*ap);
ND_TCHECK(*ap);
ND_PRINT((ndo, "\n\t\t%s", ipaddr_string(ndo, ap)));
++ap;
}
break;
case LS_TYPE_INTER_AP | LS_SCOPE_AREA:
if (lsa_length < sizeof (lsap->lsa_un.un_inter_ap.inter_ap_metric))
return (1);
lsa_length -= sizeof (lsap->lsa_un.un_inter_ap.inter_ap_metric);
ND_TCHECK(lsap->lsa_un.un_inter_ap.inter_ap_metric);
ND_PRINT((ndo, ", metric %u",
EXTRACT_32BITS(&lsap->lsa_un.un_inter_ap.inter_ap_metric) & SLA_MASK_METRIC));
tptr = (const uint8_t *)lsap->lsa_un.un_inter_ap.inter_ap_prefix;
while (lsa_length != 0) {
bytelen = ospf6_print_lsaprefix(ndo, tptr, lsa_length);
if (bytelen < 0)
goto trunc;
lsa_length -= bytelen;
tptr += bytelen;
}
break;
case LS_TYPE_ASE | LS_SCOPE_AS:
if (lsa_length < sizeof (lsap->lsa_un.un_asla.asla_metric))
return (1);
lsa_length -= sizeof (lsap->lsa_un.un_asla.asla_metric);
ND_TCHECK(lsap->lsa_un.un_asla.asla_metric);
flags32 = EXTRACT_32BITS(&lsap->lsa_un.un_asla.asla_metric);
ND_PRINT((ndo, "\n\t Flags [%s]",
bittok2str(ospf6_asla_flag_values, "none", flags32)));
ND_PRINT((ndo, " metric %u",
EXTRACT_32BITS(&lsap->lsa_un.un_asla.asla_metric) &
ASLA_MASK_METRIC));
tptr = (const uint8_t *)lsap->lsa_un.un_asla.asla_prefix;
lsapp = (const struct lsa6_prefix *)tptr;
bytelen = ospf6_print_lsaprefix(ndo, tptr, lsa_length);
if (bytelen < 0)
goto trunc;
lsa_length -= bytelen;
tptr += bytelen;
if ((flags32 & ASLA_FLAG_FWDADDR) != 0) {
const struct in6_addr *fwdaddr6;
fwdaddr6 = (const struct in6_addr *)tptr;
if (lsa_length < sizeof (*fwdaddr6))
return (1);
lsa_length -= sizeof (*fwdaddr6);
ND_TCHECK(*fwdaddr6);
ND_PRINT((ndo, " forward %s",
ip6addr_string(ndo, fwdaddr6)));
tptr += sizeof(*fwdaddr6);
}
if ((flags32 & ASLA_FLAG_ROUTETAG) != 0) {
if (lsa_length < sizeof (uint32_t))
return (1);
lsa_length -= sizeof (uint32_t);
ND_TCHECK(*(const uint32_t *)tptr);
ND_PRINT((ndo, " tag %s",
ipaddr_string(ndo, (const uint32_t *)tptr)));
tptr += sizeof(uint32_t);
}
if (lsapp->lsa_p_metric) {
if (lsa_length < sizeof (uint32_t))
return (1);
lsa_length -= sizeof (uint32_t);
ND_TCHECK(*(const uint32_t *)tptr);
ND_PRINT((ndo, " RefLSID: %s",
ipaddr_string(ndo, (const uint32_t *)tptr)));
tptr += sizeof(uint32_t);
}
break;
case LS_TYPE_LINK:
/* Link LSA */
llsap = &lsap->lsa_un.un_llsa;
if (lsa_length < sizeof (llsap->llsa_priandopt))
return (1);
lsa_length -= sizeof (llsap->llsa_priandopt);
ND_TCHECK(llsap->llsa_priandopt);
ND_PRINT((ndo, "\n\t Options [%s]",
bittok2str(ospf6_option_values, "none",
EXTRACT_32BITS(&llsap->llsa_options))));
if (lsa_length < sizeof (llsap->llsa_lladdr) + sizeof (llsap->llsa_nprefix))
return (1);
lsa_length -= sizeof (llsap->llsa_lladdr) + sizeof (llsap->llsa_nprefix);
prefixes = EXTRACT_32BITS(&llsap->llsa_nprefix);
ND_PRINT((ndo, "\n\t Priority %d, Link-local address %s, Prefixes %d:",
llsap->llsa_priority,
ip6addr_string(ndo, &llsap->llsa_lladdr),
prefixes));
tptr = (const uint8_t *)llsap->llsa_prefix;
while (prefixes > 0) {
bytelen = ospf6_print_lsaprefix(ndo, tptr, lsa_length);
if (bytelen < 0)
goto trunc;
prefixes--;
lsa_length -= bytelen;
tptr += bytelen;
}
break;
case LS_TYPE_INTRA_AP | LS_SCOPE_AREA:
/* Intra-Area-Prefix LSA */
if (lsa_length < sizeof (lsap->lsa_un.un_intra_ap.intra_ap_rtid))
return (1);
lsa_length -= sizeof (lsap->lsa_un.un_intra_ap.intra_ap_rtid);
ND_TCHECK(lsap->lsa_un.un_intra_ap.intra_ap_rtid);
ospf6_print_ls_type(ndo,
EXTRACT_16BITS(&lsap->lsa_un.un_intra_ap.intra_ap_lstype),
&lsap->lsa_un.un_intra_ap.intra_ap_lsid);
if (lsa_length < sizeof (lsap->lsa_un.un_intra_ap.intra_ap_nprefix))
return (1);
lsa_length -= sizeof (lsap->lsa_un.un_intra_ap.intra_ap_nprefix);
ND_TCHECK(lsap->lsa_un.un_intra_ap.intra_ap_nprefix);
prefixes = EXTRACT_16BITS(&lsap->lsa_un.un_intra_ap.intra_ap_nprefix);
ND_PRINT((ndo, "\n\t Prefixes %d:", prefixes));
tptr = (const uint8_t *)lsap->lsa_un.un_intra_ap.intra_ap_prefix;
while (prefixes > 0) {
bytelen = ospf6_print_lsaprefix(ndo, tptr, lsa_length);
if (bytelen < 0)
goto trunc;
prefixes--;
lsa_length -= bytelen;
tptr += bytelen;
}
break;
case LS_TYPE_GRACE | LS_SCOPE_LINKLOCAL:
if (ospf_print_grace_lsa(ndo, tptr, lsa_length) == -1) {
return 1;
}
break;
case LS_TYPE_INTRA_ATE | LS_SCOPE_LINKLOCAL:
if (ospf_print_te_lsa(ndo, tptr, lsa_length) == -1) {
return 1;
}
break;
default:
if(!print_unknown_data(ndo,tptr,
"\n\t ",
lsa_length)) {
return (1);
}
break;
}
return (0);
trunc:
return (1);
}
| 0 |
rufus | c3c39f7f8a11f612c4ebf7affce25ec6928eb1cb | NOT_APPLICABLE | NOT_APPLICABLE | BOOL CreateTaskbarList(void)
{
HRESULT hr;
if (nWindowsVersion < WINDOWS_7)
return FALSE;
hr = CoCreateInstance(&my_CLSID_TaskbarList, NULL, CLSCTX_ALL, &my_IID_ITaskbarList3, (LPVOID)&ptbl);
if (FAILED(hr)) {
uprintf("CoCreateInstance for TaskbarList failed: error %X\n", hr);
ptbl = NULL;
return FALSE;
}
return TRUE;
}
| 0 |
savannah | 4d729e322fae359a1aefaafec1144764a54e8ad4 | NOT_APPLICABLE | NOT_APPLICABLE | scheme_default_port (enum url_scheme scheme)
{
return supported_schemes[scheme].default_port;
}
| 0 |
linux | 550fd08c2cebad61c548def135f67aba284c6162 | NOT_APPLICABLE | NOT_APPLICABLE | static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return RX_HANDLER_CONSUMED;
*pskb = skb;
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
if (bond->params.arp_interval)
slave->dev->last_rx = jiffies;
if (bond->recv_probe) {
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
if (likely(nskb)) {
bond->recv_probe(nskb, bond, slave);
dev_kfree_skb(nskb);
}
}
if (bond_should_deliver_exact_match(skb, slave, bond)) {
return RX_HANDLER_EXACT;
}
skb->dev = bond->dev;
if (bond->params.mode == BOND_MODE_ALB &&
bond->dev->priv_flags & IFF_BRIDGE_PORT &&
skb->pkt_type == PACKET_HOST) {
if (unlikely(skb_cow_head(skb,
skb->data - skb_mac_header(skb)))) {
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
}
return RX_HANDLER_ANOTHER;
}
| 0 |
Chrome | 29734f46c6dc9362783091180c2ee279ad53637f | NOT_APPLICABLE | NOT_APPLICABLE | bool V4L2JpegEncodeAccelerator::EncodedInstance::SetInputBufferFormat(
gfx::Size coded_size) {
DCHECK(parent_->encoder_task_runner_->BelongsToCurrentThread());
DCHECK(!input_streamon_);
DCHECK(input_job_queue_.empty());
constexpr uint32_t input_pix_fmt_candidates[] = {
V4L2_PIX_FMT_YUV420M,
V4L2_PIX_FMT_YUV420,
};
struct v4l2_format format;
input_buffer_pixelformat_ = 0;
for (const auto input_pix_fmt : input_pix_fmt_candidates) {
DCHECK_EQ(V4L2Device::V4L2PixFmtToVideoPixelFormat(input_pix_fmt),
PIXEL_FORMAT_I420);
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
format.fmt.pix_mp.num_planes = kMaxI420Plane;
format.fmt.pix_mp.pixelformat = input_pix_fmt;
format.fmt.pix_mp.field = V4L2_FIELD_ANY;
format.fmt.pix_mp.width = coded_size.width();
format.fmt.pix_mp.height = coded_size.height();
if (device_->Ioctl(VIDIOC_S_FMT, &format) == 0 &&
format.fmt.pix_mp.pixelformat == input_pix_fmt) {
input_buffer_pixelformat_ = format.fmt.pix_mp.pixelformat;
input_buffer_num_planes_ = format.fmt.pix_mp.num_planes;
input_buffer_height_ = format.fmt.pix_mp.height;
break;
}
}
if (input_buffer_pixelformat_ == 0) {
VLOGF(1) << "Neither YUV420 nor YUV420M is supported.";
return false;
}
if (format.fmt.pix_mp.width != static_cast<uint32_t>(coded_size.width()) ||
format.fmt.pix_mp.height != static_cast<uint32_t>(coded_size.height())) {
VLOGF(1) << "Width " << coded_size.width() << "->"
<< format.fmt.pix_mp.width << ",Height " << coded_size.height()
<< "->" << format.fmt.pix_mp.height;
return false;
}
for (int i = 0; i < format.fmt.pix_mp.num_planes; i++) {
bytes_per_line_[i] = format.fmt.pix_mp.plane_fmt[i].bytesperline;
VLOGF(3) << "Bytes Per Line:" << bytes_per_line_[i];
}
return true;
}
| 0 |
openssl | aab7c770353b1dc4ba045938c8fb446dd1c4531e | NOT_APPLICABLE | NOT_APPLICABLE | static int ec_mul_consttime(const EC_GROUP *group, EC_POINT *r, const BIGNUM *scalar,
const EC_POINT *point, BN_CTX *ctx)
{
int i, order_bits, group_top, kbit, pbit, Z_is_one, ret;
ret = 0;
EC_POINT *s = NULL;
BIGNUM *k = NULL;
BIGNUM *lambda = NULL;
BN_CTX *new_ctx = NULL;
if (ctx == NULL)
if ((ctx = new_ctx = BN_CTX_secure_new()) == NULL)
return 0;
if ((group->order == NULL) || (group->field == NULL))
goto err;
order_bits = BN_num_bits(group->order);
s = EC_POINT_new(group);
if (s == NULL)
goto err;
if (point == NULL) {
if (group->generator == NULL)
goto err;
if (!EC_POINT_copy(s, group->generator))
goto err;
} else {
if (!EC_POINT_copy(s, point))
goto err;
}
EC_POINT_set_flags(s, BN_FLG_CONSTTIME);
BN_CTX_start(ctx);
lambda = BN_CTX_get(ctx);
k = BN_CTX_get(ctx);
if (k == NULL)
goto err;
/*
* Group orders are often on a word boundary.
* So when we pad the scalar, some timing diff might
* pop if it needs to be expanded due to carries.
* So expand ahead of time.
*/
group_top = bn_get_top(group->order);
if ((bn_wexpand(k, group_top + 1) == NULL)
|| (bn_wexpand(lambda, group_top + 1) == NULL))
goto err;
if (!BN_copy(k, scalar))
goto err;
BN_set_flags(k, BN_FLG_CONSTTIME);
if ((BN_num_bits(k) > order_bits) || (BN_is_negative(k))) {
/*
* this is an unusual input, and we don't guarantee
* constant-timeness
*/
if(!BN_nnmod(k, k, group->order, ctx))
goto err;
}
if (!BN_add(lambda, k, group->order))
goto err;
BN_set_flags(lambda, BN_FLG_CONSTTIME);
if (!BN_add(k, lambda, group->order))
goto err;
/*
* lambda := scalar + order
* k := scalar + 2*order
*/
kbit = BN_is_bit_set(lambda, order_bits);
BN_consttime_swap(kbit, k, lambda, group_top + 1);
group_top = bn_get_top(group->field);
if ((bn_wexpand(s->X, group_top) == NULL)
|| (bn_wexpand(s->Y, group_top) == NULL)
|| (bn_wexpand(s->Z, group_top) == NULL)
|| (bn_wexpand(r->X, group_top) == NULL)
|| (bn_wexpand(r->Y, group_top) == NULL)
|| (bn_wexpand(r->Z, group_top) == NULL))
goto err;
/* top bit is a 1, in a fixed pos */
if (!EC_POINT_copy(r, s))
goto err;
EC_POINT_set_flags(r, BN_FLG_CONSTTIME);
if (!EC_POINT_dbl(group, s, s, ctx))
goto err;
pbit = 0;
#define EC_POINT_CSWAP(c, a, b, w, t) do { \
BN_consttime_swap(c, (a)->X, (b)->X, w); \
BN_consttime_swap(c, (a)->Y, (b)->Y, w); \
BN_consttime_swap(c, (a)->Z, (b)->Z, w); \
t = ((a)->Z_is_one ^ (b)->Z_is_one) & (c); \
(a)->Z_is_one ^= (t); \
(b)->Z_is_one ^= (t); \
} while(0)
for (i = order_bits - 1; i >= 0; i--) {
kbit = BN_is_bit_set(k, i) ^ pbit;
EC_POINT_CSWAP(kbit, r, s, group_top, Z_is_one);
if (!EC_POINT_add(group, s, r, s, ctx))
goto err;
if (!EC_POINT_dbl(group, r, r, ctx))
goto err;
/*
* pbit logic merges this cswap with that of the
* next iteration
*/
pbit ^= kbit;
}
/* one final cswap to move the right value into r */
EC_POINT_CSWAP(pbit, r, s, group_top, Z_is_one);
#undef EC_POINT_CSWAP
ret = 1;
err:
EC_POINT_free(s);
BN_CTX_end(ctx);
BN_CTX_free(new_ctx);
return ret;
} | 0 |
Android | b499389da21d89d32deff500376c5ee4f8f0b04c | NOT_APPLICABLE | NOT_APPLICABLE | FLAC_API FLAC__bool FLAC__stream_decoder_skip_single_frame(FLAC__StreamDecoder *decoder)
{
FLAC__bool got_a_frame;
FLAC__ASSERT(0 != decoder);
FLAC__ASSERT(0 != decoder->protected_);
while(1) {
switch(decoder->protected_->state) {
case FLAC__STREAM_DECODER_SEARCH_FOR_METADATA:
case FLAC__STREAM_DECODER_READ_METADATA:
return false; /* above function sets the status for us */
case FLAC__STREAM_DECODER_SEARCH_FOR_FRAME_SYNC:
if(!frame_sync_(decoder))
return true; /* above function sets the status for us */
break;
case FLAC__STREAM_DECODER_READ_FRAME:
if(!read_frame_(decoder, &got_a_frame, /*do_full_decode=*/false))
return false; /* above function sets the status for us */
if(got_a_frame)
return true; /* above function sets the status for us */
break;
case FLAC__STREAM_DECODER_END_OF_STREAM:
case FLAC__STREAM_DECODER_ABORTED:
return true;
default:
FLAC__ASSERT(0);
return false;
}
}
}
| 0 |
openssl | 98ece4eebfb6cd45cc8d550c6ac0022965071afc | NOT_APPLICABLE | NOT_APPLICABLE | int ssl_cipher_list_to_bytes(SSL *s, STACK_OF(SSL_CIPHER) *sk,
unsigned char *p,
int (*put_cb) (const SSL_CIPHER *,
unsigned char *))
{
int i, j = 0;
SSL_CIPHER *c;
unsigned char *q;
int empty_reneg_info_scsv = !s->renegotiate;
/* Set disabled masks for this session */
ssl_set_client_disabled(s);
if (sk == NULL)
return (0);
q = p;
if (put_cb == NULL)
put_cb = s->method->put_cipher_by_char;
for (i = 0; i < sk_SSL_CIPHER_num(sk); i++) {
c = sk_SSL_CIPHER_value(sk, i);
/* Skip disabled ciphers */
if (ssl_cipher_disabled(s, c, SSL_SECOP_CIPHER_SUPPORTED))
continue;
#ifdef OPENSSL_SSL_DEBUG_BROKEN_PROTOCOL
if (c->id == SSL3_CK_SCSV) {
if (!empty_reneg_info_scsv)
continue;
else
empty_reneg_info_scsv = 0;
}
#endif
j = put_cb(c, p);
p += j;
}
/*
* If p == q, no ciphers; caller indicates an error. Otherwise, add
* applicable SCSVs.
*/
if (p != q) {
if (empty_reneg_info_scsv) {
static SSL_CIPHER scsv = {
0, NULL, SSL3_CK_SCSV, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
j = put_cb(&scsv, p);
p += j;
#ifdef OPENSSL_RI_DEBUG
fprintf(stderr,
"TLS_EMPTY_RENEGOTIATION_INFO_SCSV sent by client\n");
#endif
}
if (s->mode & SSL_MODE_SEND_FALLBACK_SCSV) {
static SSL_CIPHER scsv = {
0, NULL, SSL3_CK_FALLBACK_SCSV, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
j = put_cb(&scsv, p);
p += j;
}
}
return (p - q);
}
| 0 |
tcpdump | b20e1639dbac84b3fcb393858521c13ad47a9d70 | CVE-2017-13026 | CWE-125 | isis_print_mt_capability_subtlv(netdissect_options *ndo,
const uint8_t *tptr, int len)
{
int stlv_type, stlv_len, tmp;
while (len > 2)
{
stlv_type = *(tptr++);
stlv_len = *(tptr++);
/* first lets see if we know the subTLVs name*/
ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u",
tok2str(isis_mt_capability_subtlv_values, "unknown", stlv_type),
stlv_type,
stlv_len));
len = len - 2;
switch (stlv_type)
{
case ISIS_SUBTLV_SPB_INSTANCE:
ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN);
ND_PRINT((ndo, "\n\t CIST Root-ID: %08x", EXTRACT_32BITS(tptr)));
tptr = tptr+4;
ND_PRINT((ndo, " %08x", EXTRACT_32BITS(tptr)));
tptr = tptr+4;
ND_PRINT((ndo, ", Path Cost: %08x", EXTRACT_32BITS(tptr)));
tptr = tptr+4;
ND_PRINT((ndo, ", Prio: %d", EXTRACT_16BITS(tptr)));
tptr = tptr + 2;
ND_PRINT((ndo, "\n\t RES: %d",
EXTRACT_16BITS(tptr) >> 5));
ND_PRINT((ndo, ", V: %d",
(EXTRACT_16BITS(tptr) >> 4) & 0x0001));
ND_PRINT((ndo, ", SPSource-ID: %d",
(EXTRACT_32BITS(tptr) & 0x000fffff)));
tptr = tptr+4;
ND_PRINT((ndo, ", No of Trees: %x", *(tptr)));
tmp = *(tptr++);
len = len - ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN;
while (tmp)
{
ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN);
ND_PRINT((ndo, "\n\t U:%d, M:%d, A:%d, RES:%d",
*(tptr) >> 7, (*(tptr) >> 6) & 0x01,
(*(tptr) >> 5) & 0x01, (*(tptr) & 0x1f)));
tptr++;
ND_PRINT((ndo, ", ECT: %08x", EXTRACT_32BITS(tptr)));
tptr = tptr + 4;
ND_PRINT((ndo, ", BVID: %d, SPVID: %d",
(EXTRACT_24BITS(tptr) >> 12) & 0x000fff,
EXTRACT_24BITS(tptr) & 0x000fff));
tptr = tptr + 3;
len = len - ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN;
tmp--;
}
break;
case ISIS_SUBTLV_SPBM_SI:
ND_TCHECK2(*tptr, 8);
ND_PRINT((ndo, "\n\t BMAC: %08x", EXTRACT_32BITS(tptr)));
tptr = tptr+4;
ND_PRINT((ndo, "%04x", EXTRACT_16BITS(tptr)));
tptr = tptr+2;
ND_PRINT((ndo, ", RES: %d, VID: %d", EXTRACT_16BITS(tptr) >> 12,
(EXTRACT_16BITS(tptr)) & 0x0fff));
tptr = tptr+2;
len = len - 8;
stlv_len = stlv_len - 8;
while (stlv_len >= 4) {
ND_TCHECK2(*tptr, 4);
ND_PRINT((ndo, "\n\t T: %d, R: %d, RES: %d, ISID: %d",
(EXTRACT_32BITS(tptr) >> 31),
(EXTRACT_32BITS(tptr) >> 30) & 0x01,
(EXTRACT_32BITS(tptr) >> 24) & 0x03f,
(EXTRACT_32BITS(tptr)) & 0x0ffffff));
tptr = tptr + 4;
len = len - 4;
stlv_len = stlv_len - 4;
}
break;
default:
break;
}
}
return 0;
trunc:
ND_PRINT((ndo, "\n\t\t"));
ND_PRINT((ndo, "%s", tstr));
return(1);
}
| 1 |
ImageMagick | 8c35502217c1879cb8257c617007282eee3fe1cc | NOT_APPLICABLE | NOT_APPLICABLE | void Magick::Image::transformReset(void)
{
modifyImage();
options()->transformReset();
} | 0 |
Chrome | 2bfb2b8299e2fb6a432390a93a99a85fed1d29c9 | NOT_APPLICABLE | NOT_APPLICABLE | void WebProcessProxy::pagePreferencesChanged(WebKit::WebPageProxy *page)
{
#if PLATFORM(MAC)
if (pageIsProcessSuppressible(page))
m_processSuppressiblePages.add(page->pageID());
else
m_processSuppressiblePages.remove(page->pageID());
updateProcessSuppressionState();
#else
UNUSED_PARAM(page);
#endif
}
| 0 |
minisphere | 252c1ca184cb38e1acb917aa0e451c5f08519996 | NOT_APPLICABLE | NOT_APPLICABLE | person_get_offset(const person_t* person)
{
return mk_point2(person->x_offset, person->y_offset);
}
| 0 |
openssh-portable | e04fd6dde16de1cdc5a4d9946397ff60d96568db | NOT_APPLICABLE | NOT_APPLICABLE | process_lock_agent(SocketEntry *e, int lock)
{
int r, success = 0, delay;
char *passwd;
u_char passwdhash[LOCK_SIZE];
static u_int fail_count = 0;
size_t pwlen;
debug2_f("entering");
/*
* This is deliberately fatal: the user has requested that we lock,
* but we can't parse their request properly. The only safe thing to
* do is abort.
*/
if ((r = sshbuf_get_cstring(e->request, &passwd, &pwlen)) != 0)
fatal_fr(r, "parse");
if (pwlen == 0) {
debug("empty password not supported");
} else if (locked && !lock) {
if (bcrypt_pbkdf(passwd, pwlen, lock_salt, sizeof(lock_salt),
passwdhash, sizeof(passwdhash), LOCK_ROUNDS) < 0)
fatal("bcrypt_pbkdf");
if (timingsafe_bcmp(passwdhash, lock_pwhash, LOCK_SIZE) == 0) {
debug("agent unlocked");
locked = 0;
fail_count = 0;
explicit_bzero(lock_pwhash, sizeof(lock_pwhash));
success = 1;
} else {
/* delay in 0.1s increments up to 10s */
if (fail_count < 100)
fail_count++;
delay = 100000 * fail_count;
debug("unlock failed, delaying %0.1lf seconds",
(double)delay/1000000);
usleep(delay);
}
explicit_bzero(passwdhash, sizeof(passwdhash));
} else if (!locked && lock) {
debug("agent locked");
locked = 1;
arc4random_buf(lock_salt, sizeof(lock_salt));
if (bcrypt_pbkdf(passwd, pwlen, lock_salt, sizeof(lock_salt),
lock_pwhash, sizeof(lock_pwhash), LOCK_ROUNDS) < 0)
fatal("bcrypt_pbkdf");
success = 1;
}
freezero(passwd, pwlen);
send_status(e, success);
} | 0 |
samba | 3e8d6e681f8dbe79e4595549f78c42649b3573a2 | NOT_APPLICABLE | NOT_APPLICABLE | static int ldapsrv_add_with_controls(struct ldapsrv_call *call,
const struct ldb_message *message,
struct ldb_control **controls,
struct ldb_result *res)
{
struct ldb_context *ldb = call->conn->ldb;
struct ldb_request *req;
int ret;
ret = ldb_msg_sanity_check(ldb, message);
if (ret != LDB_SUCCESS) {
return ret;
}
ret = ldb_build_add_req(&req, ldb, ldb,
message,
controls,
res,
ldb_modify_default_callback,
NULL);
if (ret != LDB_SUCCESS) return ret;
if (call->conn->global_catalog) {
return ldb_error(ldb, LDB_ERR_UNWILLING_TO_PERFORM, "modify forbidden on global catalog port");
}
ldb_request_add_control(req, DSDB_CONTROL_NO_GLOBAL_CATALOG, false, NULL);
ret = ldb_transaction_start(ldb);
if (ret != LDB_SUCCESS) {
return ret;
}
if (!call->conn->is_privileged) {
ldb_req_mark_untrusted(req);
}
LDB_REQ_SET_LOCATION(req);
ret = ldb_request(ldb, req);
if (ret == LDB_SUCCESS) {
ret = ldb_wait(req->handle, LDB_WAIT_ALL);
}
if (ret == LDB_SUCCESS) {
ret = ldb_transaction_commit(ldb);
}
else {
ldb_transaction_cancel(ldb);
}
talloc_free(req);
return ret;
} | 0 |
OpenSC | 7114fb71b54ddfe06ce5dfdab013f4c38f129d14 | NOT_APPLICABLE | NOT_APPLICABLE | coolkey_make_public_key(sc_card_t *card, sc_cardctl_coolkey_object_t *obj, CK_KEY_TYPE key_type)
{
sc_pkcs15_pubkey_t *key;
int r;
key = calloc(1, sizeof(struct sc_pkcs15_pubkey));
if (!key)
return NULL;
switch (key_type) {
case CKK_RSA:
key->algorithm = SC_ALGORITHM_RSA;
r = coolkey_get_attribute_lv(card, obj, CKA_MODULUS, &key->u.rsa.modulus);
if (r != SC_SUCCESS) {
goto fail;
}
r = coolkey_get_attribute_lv(card, obj, CKA_PUBLIC_EXPONENT, &key->u.rsa.exponent);
if (r != SC_SUCCESS) {
goto fail;
}
break;
case CKK_EC:
key->algorithm = SC_ALGORITHM_EC;
r = coolkey_get_attribute_bytes_alloc(card, obj, CKA_EC_POINT, &key->u.ec.ecpointQ.value, &key->u.ec.ecpointQ.len);
if(r < 0) {
goto fail;
}
r = coolkey_get_attribute_bytes_alloc(card, obj, CKA_EC_PARAMS,
&key->u.ec.params.der.value, &key->u.ec.params.der.len);
if (r < 0) {
goto fail;
}
r = sc_pkcs15_fix_ec_parameters(card->ctx, &key->u.ec.params);
if (r < 0) {
goto fail;
}
break;
}
return key;
fail:
sc_pkcs15_free_pubkey(key);
/* now parse the DER cert */
return NULL;
} | 0 |
spice | 95a0cfac8a1c8eff50f05e65df945da3bb501fc9 | NOT_APPLICABLE | NOT_APPLICABLE | RedStream *red_stream_new(RedsState *reds, int socket)
{
RedStream *stream;
stream = (RedStream*) g_malloc0(sizeof(RedStream) + sizeof(RedStreamPrivate));
stream->priv = (RedStreamPrivate *)(stream+1);
stream->priv->info = g_new0(SpiceChannelEventInfo, 1);
stream->priv->reds = reds;
stream->priv->core = reds_get_core_interface(reds);
red_stream_set_socket(stream, socket);
stream->priv->read = stream_read_cb;
stream->priv->write = stream_write_cb;
stream->priv->writev = stream_writev_cb;
return stream;
} | 0 |
linux | 412b65d15a7f8a93794653968308fc100f2aa87c | NOT_APPLICABLE | NOT_APPLICABLE | void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM])
{
u32 key_item;
for (key_item = 0; key_item < HNS_PPEV2_RSS_KEY_NUM; key_item++)
dsaf_write_dev(ppe_cb, PPEV2_RSS_KEY_REG + key_item * 0x4,
rss_key[key_item]);
}
| 0 |
Chrome | 4e4c9b553ae124ed9bb60356e2ecff9106abddd0 | CVE-2016-5187 | CWE-20 | void RemoveActionCallback(const ActionCallback& callback) {
DCHECK(g_task_runner.Get());
DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
std::vector<ActionCallback>* callbacks = g_callbacks.Pointer();
for (size_t i = 0; i < callbacks->size(); ++i) {
if ((*callbacks)[i].Equals(callback)) {
callbacks->erase(callbacks->begin() + i);
return;
}
}
}
| 1 |
server | eef21014898d61e77890359d6546d4985d829ef6 | NOT_APPLICABLE | NOT_APPLICABLE | static void test_fetch_seek()
{
MYSQL_STMT *stmt;
MYSQL_BIND my_bind[3];
MYSQL_ROW_OFFSET row;
int rc;
int32 c1;
char c2[11], c3[20];
myheader("test_fetch_seek");
rc= mysql_query(mysql, "drop table if exists t1");
myquery(rc);
rc= mysql_query(mysql, "create table t1(c1 int primary key auto_increment, c2 char(10), c3 timestamp)");
myquery(rc);
rc= mysql_query(mysql, "insert into t1(c2) values('venu'), ('mysql'), ('open'), ('source')");
myquery(rc);
stmt= mysql_simple_prepare(mysql, "select * from t1");
check_stmt(stmt);
bzero((char*) my_bind, sizeof(my_bind));
my_bind[0].buffer_type= MYSQL_TYPE_LONG;
my_bind[0].buffer= (void *)&c1;
my_bind[1].buffer_type= MYSQL_TYPE_STRING;
my_bind[1].buffer= (void *)c2;
my_bind[1].buffer_length= sizeof(c2);
my_bind[2]= my_bind[1];
my_bind[2].buffer= (void *)c3;
my_bind[2].buffer_length= sizeof(c3);
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_bind_result(stmt, my_bind);
check_execute(stmt, rc);
rc= mysql_stmt_store_result(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n row 0: %ld, %s, %s", (long) c1, c2, c3);
row= mysql_stmt_row_tell(stmt);
row= mysql_stmt_row_seek(stmt, row);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n row 2: %ld, %s, %s", (long) c1, c2, c3);
row= mysql_stmt_row_seek(stmt, row);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n row 2: %ld, %s, %s", (long) c1, c2, c3);
mysql_stmt_data_seek(stmt, 0);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
if (!opt_silent)
fprintf(stdout, "\n row 0: %ld, %s, %s", (long) c1, c2, c3);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
check_execute(stmt, rc);
rc= mysql_stmt_fetch(stmt);
DIE_UNLESS(rc == MYSQL_NO_DATA);
mysql_stmt_close(stmt);
myquery(mysql_query(mysql, "drop table t1"));
} | 0 |
Chrome | 3eb1f512d8646db3a70aaef108a8f5ad8b3f013d | NOT_APPLICABLE | NOT_APPLICABLE | bool CSSStyleSheet::parseString(const String &string, bool strict)
{
setStrictParsing(strict);
CSSParser p(strict);
p.parseSheet(this, string);
return true;
}
| 0 |
linux | 8176cced706b5e5d15887584150764894e94e02f | NOT_APPLICABLE | NOT_APPLICABLE | static u64 perf_swevent_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 period = hwc->last_period;
u64 nr, offset;
s64 old, val;
hwc->last_period = hwc->sample_period;
again:
old = val = local64_read(&hwc->period_left);
if (val < 0)
return 0;
nr = div64_u64(period + val, period);
offset = nr * period;
val -= offset;
if (local64_cmpxchg(&hwc->period_left, old, val) != old)
goto again;
return nr;
}
| 0 |
kde | 82fdfd24d46966a117fa625b68784735a40f9065 | NOT_APPLICABLE | NOT_APPLICABLE | void Part::slotQuickExtractFiles(QAction *triggeredAction)
{
if (!triggeredAction->data().isNull()) {
QString userDestination = triggeredAction->data().toString();
QString finalDestinationDirectory;
const QString detectedSubfolder = detectSubfolder();
qCDebug(ARK) << "Detected subfolder" << detectedSubfolder;
if (!isSingleFolderArchive()) {
if (!userDestination.endsWith(QDir::separator())) {
userDestination.append(QDir::separator());
}
finalDestinationDirectory = userDestination + detectedSubfolder;
QDir(userDestination).mkdir(detectedSubfolder);
} else {
finalDestinationDirectory = userDestination;
}
qCDebug(ARK) << "Extracting to:" << finalDestinationDirectory;
ExtractJob *job = m_model->extractFiles(filesAndRootNodesForIndexes(addChildren(m_view->selectionModel()->selectedRows())), finalDestinationDirectory, ExtractionOptions());
registerJob(job);
connect(job, &KJob::result,
this, &Part::slotExtractionDone);
job->start();
}
}
| 0 |
linux | 04f5866e41fb70690e28397487d8bd8eea7d712a | NOT_APPLICABLE | NOT_APPLICABLE | static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
__u64 element, __u64 event,
struct list_head *obj_list,
u32 *counter)
{
struct ib_uverbs_event *entry;
unsigned long flags;
spin_lock_irqsave(&file->async_file->ev_queue.lock, flags);
if (file->async_file->ev_queue.is_closed) {
spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
return;
}
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry) {
spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
return;
}
entry->desc.async.element = element;
entry->desc.async.event_type = event;
entry->desc.async.reserved = 0;
entry->counter = counter;
list_add_tail(&entry->list, &file->async_file->ev_queue.event_list);
if (obj_list)
list_add_tail(&entry->obj_list, obj_list);
spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
wake_up_interruptible(&file->async_file->ev_queue.poll_wait);
kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN);
}
| 0 |
pupnp | c805c1de1141cb22f74c0d94dd5664bda37398e0 | NOT_APPLICABLE | NOT_APPLICABLE | void printService(service_info *service, Upnp_LogLevel level, Dbg_Module module)
{
if (service) {
if (service->serviceType) {
UpnpPrintf(level,
module,
__FILE__,
__LINE__,
"serviceType: %s\n",
service->serviceType);
}
if (service->serviceId) {
UpnpPrintf(level,
module,
__FILE__,
__LINE__,
"serviceId: %s\n",
service->serviceId);
}
if (service->SCPDURL) {
UpnpPrintf(level,
module,
__FILE__,
__LINE__,
"SCPDURL: %s\n",
service->SCPDURL);
}
if (service->controlURL) {
UpnpPrintf(level,
module,
__FILE__,
__LINE__,
"controlURL: %s\n",
service->controlURL);
}
if (service->eventURL) {
UpnpPrintf(level,
module,
__FILE__,
__LINE__,
"eventURL: %s\n",
service->eventURL);
}
if (service->UDN) {
UpnpPrintf(level,
module,
__FILE__,
__LINE__,
"UDN: %s\n\n",
service->UDN);
}
if (service->active) {
UpnpPrintf(level,
module,
__FILE__,
__LINE__,
"Service is active\n");
} else {
UpnpPrintf(level,
module,
__FILE__,
__LINE__,
"Service is inactive\n");
}
}
} | 0 |
fmt | 8cf30aa2be256eba07bb1cefb998c52326e846e7 | NOT_APPLICABLE | NOT_APPLICABLE | char type() const { return TYPE; } | 0 |
Chrome | 79cfdeb5fbe79fa2604d37fba467f371cb436bc3 | CVE-2013-2885 | CWE-399 | void BaseMultipleFieldsDateAndTimeInputType::didBlurFromControl()
{
RefPtr<HTMLInputElement> protector(element());
element()->setFocus(false);
}
| 1 |
yara | 3119b232c9c453c98d8fa8b6ae4e37ba18117cd4 | NOT_APPLICABLE | NOT_APPLICABLE | static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner)
{
struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
(void)yyg;
(void) fprintf( stderr, "%s\n", msg );
exit( YY_EXIT_FAILURE );
}
| 0 |
php-src | feba44546c27b0158f9ac20e72040a224b918c75 | NOT_APPLICABLE | NOT_APPLICABLE | gdImagePolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
int i;
int lx, ly;
typedef void (*image_line)(gdImagePtr im, int x1, int y1, int x2, int y2, int color);
image_line draw_line;
if (!n) {
return;
}
if ( im->antialias) {
draw_line = gdImageAALine;
} else {
draw_line = gdImageLine;
}
lx = p->x;
ly = p->y;
draw_line(im, lx, ly, p[n - 1].x, p[n - 1].y, c);
for (i = 1; (i < n); i++) {
p++;
draw_line(im, lx, ly, p->x, p->y, c);
lx = p->x;
ly = p->y;
}
} | 0 |
linux | 44afb3a04391a74309d16180d1e4f8386fdfa745 | NOT_APPLICABLE | NOT_APPLICABLE | i915_gem_execbuffer_flush(struct drm_device *dev,
uint32_t invalidate_domains,
uint32_t flush_domains,
uint32_t flush_rings)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i, ret;
if (flush_domains & I915_GEM_DOMAIN_CPU)
intel_gtt_chipset_flush();
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
for (i = 0; i < I915_NUM_RINGS; i++)
if (flush_rings & (1 << i)) {
ret = i915_gem_flush_ring(&dev_priv->ring[i],
invalidate_domains,
flush_domains);
if (ret)
return ret;
}
}
return 0;
}
| 0 |
Chrome | 83a4b3aa72d98fe4176b4a54c8cea227ed966570 | NOT_APPLICABLE | NOT_APPLICABLE | void ModuleSystem::OnModuleLoaded(
scoped_ptr<v8::Global<v8::Promise::Resolver>> resolver,
v8::Local<v8::Value> value) {
if (!is_valid())
return;
v8::HandleScope handle_scope(GetIsolate());
v8::Local<v8::Promise::Resolver> resolver_local(
v8::Local<v8::Promise::Resolver>::New(GetIsolate(), *resolver));
resolver_local->Resolve(context()->v8_context(), value);
}
| 0 |
linux | e93b7d748be887cd7639b113ba7d7ef792a7efb9 | NOT_APPLICABLE | NOT_APPLICABLE | int ip_send_skb(struct net *net, struct sk_buff *skb)
{
int err;
err = ip_local_out(skb);
if (err) {
if (err > 0)
err = net_xmit_errno(err);
if (err)
IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
}
return err;
}
| 0 |
php-src | 20ce2fe8e3c211a42fee05a461a5881be9a8790e | NOT_APPLICABLE | NOT_APPLICABLE | static inline void var_push(php_unserialize_data_t *var_hashx, zval *rval)
{
var_entries *var_hash = (*var_hashx)->last;
#if VAR_ENTRIES_DBG
fprintf(stderr, "var_push(%ld): %d\n", var_hash?var_hash->used_slots:-1L, Z_TYPE_P(rval));
#endif
if (!var_hash || var_hash->used_slots == VAR_ENTRIES_MAX) {
var_hash = emalloc(sizeof(var_entries));
var_hash->used_slots = 0;
var_hash->next = 0;
if (!(*var_hashx)->first) {
(*var_hashx)->first = var_hash;
} else {
((var_entries *) (*var_hashx)->last)->next = var_hash;
}
(*var_hashx)->last = var_hash;
}
var_hash->data[var_hash->used_slots++] = rval;
} | 0 |
nedmalloc | 2965eca30c408c13473c4146a9d47d547d288db1 | NOT_APPLICABLE | NOT_APPLICABLE | static NOINLINE int InitPool(nedpool *RESTRICT p, size_t capacity, int threads) THROWSPEC
{ /* threads is -1 for system pool */
ensure_initialization();
ACQUIRE_MALLOC_GLOBAL_LOCK();
if(p->threads) goto done;
#if USE_LOCKS
if(INITIAL_LOCK(&p->mutex)) goto err;
#endif
if(TLSALLOC(&p->mycache)) goto err;
#if USE_ALLOCATOR==0
p->m[0]=(mstate) mspacecounter++;
#elif USE_ALLOCATOR==1
if(!(p->m[0]=(mstate) create_mspace(capacity, 1))) goto err;
p->m[0]->extp=p;
#endif
p->threads=(threads>MAXTHREADSINPOOL) ? MAXTHREADSINPOOL : (!threads) ? DEFAULTMAXTHREADSINPOOL : threads;
done:
RELEASE_MALLOC_GLOBAL_LOCK();
return 1;
err:
if(threads<0)
abort(); /* If you can't allocate for system pool, we're screwed */
DestroyCaches(p);
if(p->m[0])
{
#if USE_ALLOCATOR==1
destroy_mspace(p->m[0]);
#endif
p->m[0]=0;
}
if(p->mycache)
{
if(TLSFREE(p->mycache)) abort();
p->mycache=0;
}
RELEASE_MALLOC_GLOBAL_LOCK();
return 0;
}
| 0 |
openssl | ce325c60c74b0fa784f5872404b722e120e5cab0 | NOT_APPLICABLE | NOT_APPLICABLE | int ssl3_get_server_hello(SSL *s)
{
STACK_OF(SSL_CIPHER) *sk;
const SSL_CIPHER *c;
CERT *ct = s->cert;
unsigned char *p,*d;
int i,al=SSL_AD_INTERNAL_ERROR,ok;
unsigned int j;
long n;
#ifndef OPENSSL_NO_COMP
SSL_COMP *comp;
#endif
/* Hello verify request and/or server hello version may not
* match so set first packet if we're negotiating version.
*/
if (SSL_IS_DTLS(s))
s->first_packet = 1;
n=s->method->ssl_get_message(s,
SSL3_ST_CR_SRVR_HELLO_A,
SSL3_ST_CR_SRVR_HELLO_B,
-1,
20000, /* ?? */
&ok);
if (!ok) return((int)n);
if (SSL_IS_DTLS(s))
{
s->first_packet = 0;
if ( s->s3->tmp.message_type == DTLS1_MT_HELLO_VERIFY_REQUEST)
{
if ( s->d1->send_cookie == 0)
{
s->s3->tmp.reuse_message = 1;
return 1;
}
else /* already sent a cookie */
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_BAD_MESSAGE_TYPE);
goto f_err;
}
}
}
if ( s->s3->tmp.message_type != SSL3_MT_SERVER_HELLO)
{
al=SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_BAD_MESSAGE_TYPE);
goto f_err;
}
d=p=(unsigned char *)s->init_msg;
if (s->method->version == DTLS_ANY_VERSION)
{
/* Work out correct protocol version to use */
int hversion = (p[0] << 8)|p[1];
int options = s->options;
if (hversion == DTLS1_2_VERSION
&& !(options & SSL_OP_NO_DTLSv1_2))
s->method = DTLSv1_2_client_method();
else if (tls1_suiteb(s))
{
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO, SSL_R_ONLY_DTLS_1_2_ALLOWED_IN_SUITEB_MODE);
s->version = hversion;
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
else if (hversion == DTLS1_VERSION
&& !(options & SSL_OP_NO_DTLSv1))
s->method = DTLSv1_client_method();
else
{
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_WRONG_SSL_VERSION);
s->version = hversion;
al = SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
s->version = s->method->version;
}
if ((p[0] != (s->version>>8)) || (p[1] != (s->version&0xff)))
{
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_WRONG_SSL_VERSION);
s->version=(s->version&0xff00)|p[1];
al=SSL_AD_PROTOCOL_VERSION;
goto f_err;
}
p+=2;
/* load the server hello data */
/* load the server random */
memcpy(s->s3->server_random,p,SSL3_RANDOM_SIZE);
p+=SSL3_RANDOM_SIZE;
s->hit = 0;
/* get the session-id */
j= *(p++);
if ((j > sizeof s->session->session_id) || (j > SSL3_SESSION_ID_SIZE))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_SSL3_SESSION_ID_TOO_LONG);
goto f_err;
}
#ifndef OPENSSL_NO_TLSEXT
/* check if we want to resume the session based on external pre-shared secret */
if (s->version >= TLS1_VERSION && s->tls_session_secret_cb)
{
SSL_CIPHER *pref_cipher=NULL;
s->session->master_key_length=sizeof(s->session->master_key);
if (s->tls_session_secret_cb(s, s->session->master_key,
&s->session->master_key_length,
NULL, &pref_cipher,
s->tls_session_secret_cb_arg))
{
s->session->cipher = pref_cipher ?
pref_cipher : ssl_get_cipher_by_char(s, p+j);
s->hit = 1;
}
}
#endif /* OPENSSL_NO_TLSEXT */
if (!s->hit && j != 0 && j == s->session->session_id_length
&& memcmp(p,s->session->session_id,j) == 0)
{
if(s->sid_ctx_length != s->session->sid_ctx_length
|| memcmp(s->session->sid_ctx,s->sid_ctx,s->sid_ctx_length))
{
/* actually a client application bug */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT);
goto f_err;
}
s->hit=1;
}
/* a miss or crap from the other end */
if (!s->hit)
{
/* If we were trying for session-id reuse, make a new
* SSL_SESSION so we don't stuff up other people */
if (s->session->session_id_length > 0)
{
if (!ssl_get_new_session(s,0))
{
goto f_err;
}
}
s->session->session_id_length=j;
memcpy(s->session->session_id,p,j); /* j could be 0 */
}
p+=j;
c=ssl_get_cipher_by_char(s,p);
if (c == NULL)
{
/* unknown cipher */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_UNKNOWN_CIPHER_RETURNED);
goto f_err;
}
/* Set version disabled mask now we know version */
if (!SSL_USE_TLS1_2_CIPHERS(s))
ct->mask_ssl = SSL_TLSV1_2;
else
ct->mask_ssl = 0;
/* If it is a disabled cipher we didn't send it in client hello,
* so return an error.
*/
if (ssl_cipher_disabled(s, c, SSL_SECOP_CIPHER_CHECK))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_WRONG_CIPHER_RETURNED);
goto f_err;
}
p+=ssl_put_cipher_by_char(s,NULL,NULL);
sk=ssl_get_ciphers_by_id(s);
i=sk_SSL_CIPHER_find(sk,c);
if (i < 0)
{
/* we did not say we would use this cipher */
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_WRONG_CIPHER_RETURNED);
goto f_err;
}
/* Depending on the session caching (internal/external), the cipher
and/or cipher_id values may not be set. Make sure that
cipher_id is set and use it for comparison. */
if (s->session->cipher)
s->session->cipher_id = s->session->cipher->id;
if (s->hit && (s->session->cipher_id != c->id))
{
/* Workaround is now obsolete */
#if 0
if (!(s->options &
SSL_OP_NETSCAPE_REUSE_CIPHER_CHANGE_BUG))
#endif
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED);
goto f_err;
}
}
s->s3->tmp.new_cipher=c;
/* Don't digest cached records if no sigalgs: we may need them for
* client authentication.
*/
if (!SSL_USE_SIGALGS(s) && !ssl3_digest_cached_records(s))
goto f_err;
/* lets get the compression algorithm */
/* COMPRESSION */
#ifdef OPENSSL_NO_COMP
if (*(p++) != 0)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM);
goto f_err;
}
/* If compression is disabled we'd better not try to resume a session
* using compression.
*/
if (s->session->compress_meth != 0)
{
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
#else
j= *(p++);
if (s->hit && j != s->session->compress_meth)
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED);
goto f_err;
}
if (j == 0)
comp=NULL;
else if (!ssl_allow_compression(s))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_COMPRESSION_DISABLED);
goto f_err;
}
else
comp=ssl3_comp_find(s->ctx->comp_methods,j);
if ((j != 0) && (comp == NULL))
{
al=SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM);
goto f_err;
}
else
{
s->s3->tmp.new_compression=comp;
}
#endif
#ifndef OPENSSL_NO_TLSEXT
/* TLS extensions*/
if (!ssl_parse_serverhello_tlsext(s,&p,d,n))
{
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_PARSE_TLSEXT);
goto err;
}
#endif
if (p != (d+n))
{
/* wrong packet length */
al=SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_SSL3_GET_SERVER_HELLO,SSL_R_BAD_PACKET_LENGTH);
goto f_err;
}
return(1);
f_err:
ssl3_send_alert(s,SSL3_AL_FATAL,al);
err:
return(-1);
}
| 0 |
mod_wsgi | 545354a80b9cc20d8b6916ca30542eab36c3b8bd | NOT_APPLICABLE | NOT_APPLICABLE | static int Adapter_output(AdapterObject *self, const char *data,
apr_off_t length, PyObject *string_object,
int exception_when_aborted)
{
int i = 0;
apr_status_t rv;
request_rec *r;
#if defined(MOD_WSGI_WITH_DAEMONS)
if (wsgi_idle_timeout) {
apr_thread_mutex_lock(wsgi_monitor_lock);
if (wsgi_idle_timeout) {
wsgi_idle_shutdown_time = apr_time_now();
wsgi_idle_shutdown_time += wsgi_idle_timeout;
}
apr_thread_mutex_unlock(wsgi_monitor_lock);
}
#endif
if (!self->status_line) {
PyErr_SetString(PyExc_RuntimeError, "response has not been started");
return 0;
}
r = self->r;
/* Have response headers yet been sent. */
if (self->headers) {
/*
* Apache prior to Apache 2.2.8 has a bug in it
* whereby it doesn't force '100 Continue'
* response before responding with headers if no
* read. So, force a zero length read before
* sending the headers if haven't yet attempted
* to read anything. This will ensure that if no
* request content has been read that any '100
* Continue' response will be flushed and sent
* back to the client if client was expecting
* one. Only want to do this for 2xx and 3xx
* status values. Note that even though Apple
* supplied version of Apache on MacOS X Leopard
* is newer than version 2.2.8, the header file
* has never been patched when they make updates
* and so anything compiled against it thinks it
* is older.
*/
#if (AP_SERVER_MAJORVERSION_NUMBER == 2 && \
AP_SERVER_MINORVERSION_NUMBER < 2) || \
(AP_SERVER_MAJORVERSION_NUMBER == 2 && \
AP_SERVER_MINORVERSION_NUMBER == 2 && \
AP_SERVER_PATCHLEVEL_NUMBER < 8)
if (!self->input->init) {
if (self->status >= 200 && self->status < 400) {
PyObject *args = NULL;
PyObject *result = NULL;
args = Py_BuildValue("(i)", 0);
result = Input_read(self->input, args);
if (PyErr_Occurred())
PyErr_Clear();
Py_DECREF(args);
Py_XDECREF(result);
}
}
#endif
/*
* Now setup the response headers in request object. We
* have already converted any native strings in the
* headers to byte strings and validated the format of
* the header names and values so can skip all the error
* checking.
*/
r->status = self->status;
r->status_line = self->status_line;
for (i = 0; i < PyList_Size(self->headers); i++) {
PyObject *tuple = NULL;
PyObject *object1 = NULL;
PyObject *object2 = NULL;
char *name = NULL;
char *value = NULL;
tuple = PyList_GetItem(self->headers, i);
object1 = PyTuple_GetItem(tuple, 0);
object2 = PyTuple_GetItem(tuple, 1);
name = PyBytes_AsString(object1);
value = PyBytes_AsString(object2);
if (!strcasecmp(name, "Content-Type")) {
/*
* In a daemon child process we cannot call the
* function ap_set_content_type() as want to
* avoid adding any output filters based on the
* type of file being served as this will be
* done in the main Apache child process which
* proxied the request to the daemon process.
*/
if (*self->config->process_group)
r->content_type = apr_pstrdup(r->pool, value);
else
ap_set_content_type(r, apr_pstrdup(r->pool, value));
}
else if (!strcasecmp(name, "Content-Length")) {
char *v = value;
long l = 0;
errno = 0;
l = strtol(v, &v, 10);
if (*v || errno == ERANGE || l < 0) {
PyErr_SetString(PyExc_ValueError,
"invalid content length");
return 0;
}
ap_set_content_length(r, l);
self->content_length_set = 1;
self->content_length = l;
}
else if (!strcasecmp(name, "WWW-Authenticate")) {
apr_table_add(r->err_headers_out, name, value);
}
else {
apr_table_add(r->headers_out, name, value);
}
}
/*
* Reset flag indicating whether '100 Continue' response
* expected. If we don't do this then if an attempt to read
* input for the first time is after headers have been
* sent, then Apache is wrongly generate the '100 Continue'
* response into the response content. Not sure if this is
* a bug in Apache, or that it truly believes that input
* will never be read after the response headers have been
* sent.
*/
r->expecting_100 = 0;
/* No longer need headers now that they have been sent. */
Py_DECREF(self->headers);
self->headers = NULL;
}
/*
* If content length was specified, ensure that we don't
* actually output more data than was specified as being
* sent as otherwise technically in violation of HTTP RFC.
*/
if (length) {
apr_off_t output_length = length;
if (self->content_length_set) {
if (self->output_length < self->content_length) {
if (self->output_length + length > self->content_length) {
length = self->content_length - self->output_length;
}
}
else
length = 0;
}
self->output_length += output_length;
}
/* Now output any data. */
if (length) {
apr_bucket *b;
/*
* When using Apache 2.X can use lower level
* bucket brigade APIs. This is preferred as
* ap_rwrite()/ap_rflush() will grow memory in
* the request pool on each call, which will
* result in an increase in memory use over time
* when streaming of data is being performed.
* The memory is still reclaimed, but only at
* the end of the request. Using bucket brigade
* API avoids this, and also avoids any copying
* of response data due to buffering performed
* by ap_rwrite().
*/
if (r->connection->aborted) {
if (!exception_when_aborted) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, self->r,
"mod_wsgi (pid=%d): Client closed connection.",
getpid());
}
else
PyErr_SetString(PyExc_IOError, "client connection closed");
return 0;
}
if (!self->bb) {
self->bb = apr_brigade_create(r->pool,
r->connection->bucket_alloc);
}
#if 0
if (string_object) {
b = wsgi_apr_bucket_python_create(data, length,
self->config->application_group, string_object,
r->connection->bucket_alloc);
}
else {
#endif
b = apr_bucket_transient_create(data, (apr_size_t)length,
r->connection->bucket_alloc);
#if 0
}
#endif
APR_BRIGADE_INSERT_TAIL(self->bb, b);
b = apr_bucket_flush_create(r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(self->bb, b);
Py_BEGIN_ALLOW_THREADS
rv = ap_pass_brigade(r->output_filters, self->bb);
Py_END_ALLOW_THREADS
if (rv != APR_SUCCESS) {
PyErr_SetString(PyExc_IOError, "failed to write data");
return 0;
}
Py_BEGIN_ALLOW_THREADS
apr_brigade_cleanup(self->bb);
Py_END_ALLOW_THREADS
}
/*
* Check whether aborted connection was found when data
* being written, otherwise will not be flagged until next
* time that data is being written. Early detection is
* better as it may have been the last data block being
* written and application may think that data has all
* been written. In a streaming application, we also want
* to avoid any additional data processing to generate any
* successive data.
*/
if (r->connection->aborted) {
if (!exception_when_aborted) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, self->r,
"mod_wsgi (pid=%d): Client closed connection.",
getpid());
}
else
PyErr_SetString(PyExc_IOError, "client connection closed");
return 0;
}
return 1;
} | 0 |
qemu | defac5e2fbddf8423a354ff0454283a2115e1367 | NOT_APPLICABLE | NOT_APPLICABLE | static void fdctrl_format_sector(FDCtrl *fdctrl)
{
FDrive *cur_drv;
uint8_t kh, kt, ks;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
kt = fdctrl->fifo[6];
kh = fdctrl->fifo[7];
ks = fdctrl->fifo[8];
FLOPPY_DPRINTF("format sector at %d %d %02x %02x (%d)\n",
GET_CUR_DRV(fdctrl), kh, kt, ks,
fd_sector_calc(kh, kt, ks, cur_drv->last_sect,
NUM_SIDES(cur_drv)));
switch (fd_seek(cur_drv, kh, kt, ks, fdctrl->config & FD_CONFIG_EIS)) {
case 2:
/* sect too big */
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00);
fdctrl->fifo[3] = kt;
fdctrl->fifo[4] = kh;
fdctrl->fifo[5] = ks;
return;
case 3:
/* track too big */
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_EC, 0x00);
fdctrl->fifo[3] = kt;
fdctrl->fifo[4] = kh;
fdctrl->fifo[5] = ks;
return;
case 4:
/* No seek enabled */
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, 0x00, 0x00);
fdctrl->fifo[3] = kt;
fdctrl->fifo[4] = kh;
fdctrl->fifo[5] = ks;
return;
case 1:
fdctrl->status0 |= FD_SR0_SEEK;
break;
default:
break;
}
memset(fdctrl->fifo, 0, FD_SECTOR_LEN);
if (cur_drv->blk == NULL ||
blk_pwrite(cur_drv->blk, fd_offset(cur_drv), fdctrl->fifo,
BDRV_SECTOR_SIZE, 0) < 0) {
FLOPPY_DPRINTF("error formatting sector %d\n", fd_sector(cur_drv));
fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM | FD_SR0_SEEK, 0x00, 0x00);
} else {
if (cur_drv->sect == cur_drv->last_sect) {
fdctrl->data_state &= ~FD_STATE_FORMAT;
/* Last sector done */
fdctrl_stop_transfer(fdctrl, 0x00, 0x00, 0x00);
} else {
/* More to do */
fdctrl->data_pos = 0;
fdctrl->data_len = 4;
}
}
} | 0 |
linux | b2853fd6c2d0f383dbdf7427e263eb576a633867 | NOT_APPLICABLE | NOT_APPLICABLE | static void cm_add_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
struct ib_mad_reg_req reg_req = {
.mgmt_class = IB_MGMT_CLASS_CM,
.mgmt_class_version = IB_CM_CLASS_VERSION
};
struct ib_port_modify port_modify = {
.set_port_cap_mask = IB_PORT_CM_SUP
};
unsigned long flags;
int ret;
u8 i;
if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
return;
cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
ib_device->phys_port_cnt, GFP_KERNEL);
if (!cm_dev)
return;
cm_dev->ib_device = ib_device;
cm_get_ack_delay(cm_dev);
cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL,
"%s", ib_device->name);
if (IS_ERR(cm_dev->device)) {
kfree(cm_dev);
return;
}
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
port = kzalloc(sizeof *port, GFP_KERNEL);
if (!port)
goto error1;
cm_dev->port[i-1] = port;
port->cm_dev = cm_dev;
port->port_num = i;
ret = cm_create_port_fs(port);
if (ret)
goto error1;
port->mad_agent = ib_register_mad_agent(ib_device, i,
IB_QPT_GSI,
®_req,
0,
cm_send_handler,
cm_recv_handler,
port);
if (IS_ERR(port->mad_agent))
goto error2;
ret = ib_modify_port(ib_device, i, 0, &port_modify);
if (ret)
goto error3;
}
ib_set_client_data(ib_device, &cm_client, cm_dev);
write_lock_irqsave(&cm.device_lock, flags);
list_add_tail(&cm_dev->list, &cm.device_list);
write_unlock_irqrestore(&cm.device_lock, flags);
return;
error3:
ib_unregister_mad_agent(port->mad_agent);
error2:
cm_remove_port_fs(port);
error1:
port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
while (--i) {
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
kfree(cm_dev);
}
| 0 |
htslib | dcd4b7304941a8832fba2d0fc4c1e716e7a4e72c | NOT_APPLICABLE | NOT_APPLICABLE | int bcf_enc_vchar(kstring_t *s, int l, const char *a)
{
bcf_enc_size(s, l, BCF_BT_CHAR);
kputsn(a, l, s);
return 0; // FIXME: check for errs in this function
} | 0 |
linux | 9804501fa1228048857910a6bf23e085aade37cc | NOT_APPLICABLE | NOT_APPLICABLE | __acquires(aarp_lock)
{
struct aarp_iter_state *iter = seq->private;
read_lock_bh(&aarp_lock);
iter->table = resolved;
iter->bucket = 0;
return *pos ? iter_next(iter, pos) : SEQ_START_TOKEN;
} | 0 |
linux | c03aa9f6e1f938618e6db2e23afef0574efeeb65 | NOT_APPLICABLE | NOT_APPLICABLE | static int udf_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
return mpage_writepages(mapping, wbc, udf_get_block);
}
| 0 |
shibboleth | b66cceb0e992c351ad5e2c665229ede82f261b16 | NOT_APPLICABLE | NOT_APPLICABLE | saml2md::EntityDescriptor* DynamicMetadataProvider::resolve(const saml2md::MetadataProvider::Criteria& criteria) const
{
#ifdef _DEBUG
xmltooling::NDC("resolve");
#endif
Category& log=Category::getInstance(SHIBSP_LOGCAT ".MetadataProvider.Dynamic");
string name;
if (criteria.entityID_ascii) {
name = criteria.entityID_ascii;
}
else if (criteria.entityID_unicode) {
auto_ptr_char temp(criteria.entityID_unicode);
name = temp.get();
}
else if (criteria.artifact) {
if (m_subst.empty() && (m_regex.empty() || m_match.empty()))
throw saml2md::MetadataException("Unable to resolve metadata dynamically from an artifact.");
name = "{sha1}" + criteria.artifact->getSource();
}
if (!m_subst.empty()) {
string name2(name);
if (!m_hashed.empty()) {
name2 = SecurityHelper::doHash(m_hashed.c_str(), name.c_str(), name.length());
}
name2 = boost::replace_first_copy(m_subst, "$entityID",
m_encoded ? XMLToolingConfig::getConfig().getURLEncoder()->encode(name2.c_str()) : name2);
log.info("transformed location from (%s) to (%s)", name.c_str(), name2.c_str());
name = name2;
}
else if (!m_match.empty() && !m_regex.empty()) {
try {
RegularExpression exp(m_match.c_str());
XMLCh* temp = exp.replace(name.c_str(), m_regex.c_str());
if (temp) {
auto_ptr_char narrow(temp);
XMLString::release(&temp);
if (name != narrow.get()) {
log.info("transformed location from (%s) to (%s)", name.c_str(), narrow.get());
name = narrow.get();
}
}
}
catch (XMLException& ex) {
auto_ptr_char msg(ex.getMessage());
log.error("caught error applying regular expression: %s", msg.get());
}
}
if (XMLString::startsWithI(name.c_str(), "file://")) {
MetadataProvider::Criteria baseCriteria(name.c_str());
return saml2md::DynamicMetadataProvider::resolve(baseCriteria);
}
const MetadataProviderCriteria* mpc = dynamic_cast<const MetadataProviderCriteria*>(&criteria);
if (!mpc)
throw saml2md::MetadataException("Dynamic MetadataProvider requires Shibboleth-aware lookup criteria, check calling code.");
const PropertySet* relyingParty;
if (criteria.artifact)
relyingParty = mpc->application.getRelyingParty((XMLCh*)nullptr);
else if (criteria.entityID_unicode)
relyingParty = mpc->application.getRelyingParty(criteria.entityID_unicode);
else {
auto_ptr_XMLCh temp2(name.c_str());
relyingParty = mpc->application.getRelyingParty(temp2.get());
}
SOAPTransport::Address addr(relyingParty->getString("entityID").second, name.c_str(), name.c_str());
const char* pch = strchr(addr.m_endpoint,':');
if (!pch)
throw IOException("location was not a URL.");
string scheme(addr.m_endpoint, pch-addr.m_endpoint);
boost::scoped_ptr<SOAPTransport> transport;
try {
transport.reset(XMLToolingConfig::getConfig().SOAPTransportManager.newPlugin(scheme.c_str(), addr));
}
catch (exception& ex) {
log.error("exception while building transport object to resolve URL: %s", ex.what());
throw IOException("Unable to resolve entityID with a known transport protocol.");
}
transport->setVerifyHost(m_verifyHost);
if (m_trust.get() && m_dummyCR.get() && !transport->setTrustEngine(m_trust.get(), m_dummyCR.get()))
throw IOException("Unable to install X509TrustEngine into transport object.");
Locker credlocker(nullptr, false);
CredentialResolver* credResolver = nullptr;
pair<bool,const char*> authType=relyingParty->getString("authType");
if (!authType.first || !strcmp(authType.second,"TLS")) {
credResolver = mpc->application.getCredentialResolver();
if (credResolver)
credlocker.assign(credResolver);
if (credResolver) {
CredentialCriteria cc;
cc.setUsage(Credential::TLS_CREDENTIAL);
authType = relyingParty->getString("keyName");
if (authType.first)
cc.getKeyNames().insert(authType.second);
const Credential* cred = credResolver->resolve(&cc);
cc.getKeyNames().clear();
if (cred) {
if (!transport->setCredential(cred))
log.error("failed to load Credential into metadata resolver");
}
else {
log.error("no TLS credential supplied");
}
}
else {
log.error("no CredentialResolver available for TLS");
}
}
else {
SOAPTransport::transport_auth_t type=SOAPTransport::transport_auth_none;
pair<bool,const char*> username=relyingParty->getString("authUsername");
pair<bool,const char*> password=relyingParty->getString("authPassword");
if (!username.first || !password.first)
log.error("transport authType (%s) specified but authUsername or authPassword was missing", authType.second);
else if (!strcmp(authType.second,"basic"))
type = SOAPTransport::transport_auth_basic;
else if (!strcmp(authType.second,"digest"))
type = SOAPTransport::transport_auth_digest;
else if (!strcmp(authType.second,"ntlm"))
type = SOAPTransport::transport_auth_ntlm;
else if (!strcmp(authType.second,"gss"))
type = SOAPTransport::transport_auth_gss;
else if (strcmp(authType.second,"none"))
log.error("unknown authType (%s) specified for RelyingParty", authType.second);
if (type > SOAPTransport::transport_auth_none) {
if (transport->setAuth(type,username.second,password.second))
log.debug("configured for transport authentication (method=%s, username=%s)", authType.second, username.second);
else
log.error("failed to configure transport authentication (method=%s)", authType.second);
}
}
pair<bool,unsigned int> timeout = relyingParty->getUnsignedInt("connectTimeout");
transport->setConnectTimeout(timeout.first ? timeout.second : 10);
timeout = relyingParty->getUnsignedInt("timeout");
transport->setTimeout(timeout.first ? timeout.second : 20);
mpc->application.getServiceProvider().setTransportOptions(*transport);
HTTPSOAPTransport* http = dynamic_cast<HTTPSOAPTransport*>(transport.get());
if (http) {
pair<bool,bool> flag = relyingParty->getBool("chunkedEncoding");
http->useChunkedEncoding(flag.first && flag.second);
http->setRequestHeader("Xerces-C", XERCES_FULLVERSIONDOT);
http->setRequestHeader("XML-Security-C", XSEC_FULLVERSIONDOT);
http->setRequestHeader("OpenSAML-C", gOpenSAMLDotVersionStr);
http->setRequestHeader(PACKAGE_NAME, PACKAGE_VERSION);
}
try {
transport->send();
istream& msg = transport->receive();
DOMDocument* doc=nullptr;
StreamInputSource src(msg, "DynamicMetadataProvider");
Wrapper4InputSource dsrc(&src,false);
if (m_validate)
doc=XMLToolingConfig::getConfig().getValidatingParser().parse(dsrc);
else
doc=XMLToolingConfig::getConfig().getParser().parse(dsrc);
XercesJanitor<DOMDocument> docjanitor(doc);
if (!doc->getDocumentElement() || !XMLHelper::isNodeNamed(doc->getDocumentElement(),
samlconstants::SAML20MD_NS, saml2md::EntityDescriptor::LOCAL_NAME)) {
throw saml2md::MetadataException("Root of metadata instance was not an EntityDescriptor");
}
auto_ptr<XMLObject> xmlObject(XMLObjectBuilder::buildOneFromElement(doc->getDocumentElement(), true));
docjanitor.release();
saml2md::EntityDescriptor* entity = dynamic_cast<saml2md::EntityDescriptor*>(xmlObject.get());
if (!entity) {
throw saml2md::MetadataException(
"Root of metadata instance not recognized: $1", params(1,xmlObject->getElementQName().toString().c_str())
);
}
xmlObject.release();
return entity;
}
catch (XMLException& e) {
auto_ptr_char msg(e.getMessage());
log.error("Xerces error while resolving location (%s): %s", name.c_str(), msg.get());
throw saml2md::MetadataException(msg.get());
}
}
| 0 |
Chrome | c9d673b54832afde658f214d7da7d0453fa89774 | NOT_APPLICABLE | NOT_APPLICABLE | int MemBackendImpl::CalculateSizeOfAllEntries(
const CompletionCallback& callback) {
return current_size_;
}
| 0 |
Android | dd3ca4d6b81a9ae2ddf358b7b93d2f8c010921f5 | NOT_APPLICABLE | NOT_APPLICABLE | void ID3::Iterator::getstring(String8 *id, bool otherdata) const {
id->setTo("");
const uint8_t *frameData = mFrameData;
if (frameData == NULL) {
return;
}
uint8_t encoding = *frameData;
if (mParent.mVersion == ID3_V1 || mParent.mVersion == ID3_V1_1) {
if (mOffset == 126 || mOffset == 127) {
char tmp[16];
snprintf(tmp, sizeof(tmp), "%d", (int)*frameData);
id->setTo(tmp);
return;
}
id->setTo((const char*)frameData, mFrameSize);
return;
}
if (mFrameSize < getHeaderLength() + 1) {
return;
}
size_t n = mFrameSize - getHeaderLength() - 1;
if (otherdata) {
if (n < 5) {
return;
}
frameData += 4;
int32_t i = n - 4;
while(--i >= 0 && *++frameData != 0) ;
int skipped = (frameData - mFrameData);
if (skipped >= (int)n) {
return;
}
n -= skipped;
}
if (n <= 0) {
return;
}
if (encoding == 0x00) {
id->setTo((const char*)frameData + 1, n);
} else if (encoding == 0x03) {
id->setTo((const char *)(frameData + 1), n);
} else if (encoding == 0x02) {
int len = n / 2;
const char16_t *framedata = (const char16_t *) (frameData + 1);
char16_t *framedatacopy = NULL;
#if BYTE_ORDER == LITTLE_ENDIAN
if (len > 0) {
framedatacopy = new (std::nothrow) char16_t[len];
if (framedatacopy == NULL) {
return;
}
for (int i = 0; i < len; i++) {
framedatacopy[i] = bswap_16(framedata[i]);
}
framedata = framedatacopy;
}
#endif
id->setTo(framedata, len);
if (framedatacopy != NULL) {
delete[] framedatacopy;
}
} else if (encoding == 0x01) {
int len = n / 2;
const char16_t *framedata = (const char16_t *) (frameData + 1);
char16_t *framedatacopy = NULL;
if (*framedata == 0xfffe) {
if (len <= 1) {
return; // nothing after the marker
}
framedatacopy = new (std::nothrow) char16_t[len];
if (framedatacopy == NULL) {
return;
}
for (int i = 0; i < len; i++) {
framedatacopy[i] = bswap_16(framedata[i]);
}
framedata = framedatacopy;
framedata++;
len--;
} else if (*framedata == 0xfeff) {
if (len <= 1) {
return; // nothing after the marker
}
framedata++;
len--;
}
bool eightBit = true;
for (int i = 0; i < len; i++) {
if (framedata[i] > 0xff) {
eightBit = false;
break;
}
}
if (eightBit) {
char *frame8 = new (std::nothrow) char[len];
if (frame8 != NULL) {
for (int i = 0; i < len; i++) {
frame8[i] = framedata[i];
}
id->setTo(frame8, len);
delete [] frame8;
} else {
id->setTo(framedata, len);
}
} else {
id->setTo(framedata, len);
}
if (framedatacopy != NULL) {
delete[] framedatacopy;
}
}
}
| 0 |
harfbuzz | 81c8ef785b079980ad5b46be4fe7c7bf156dbf65 | NOT_APPLICABLE | NOT_APPLICABLE | static HB_Error Load_ChainPosClassSet(
HB_ChainContextPosFormat2* ccpf2,
HB_ChainPosClassSet* cpcs,
HB_Stream stream )
{
HB_Error error;
HB_UShort n, m, count;
HB_UInt cur_offset, new_offset, base_offset;
HB_ChainPosClassRule* cpcr;
base_offset = FILE_Pos();
if ( ACCESS_Frame( 2L ) )
return error;
count = cpcs->ChainPosClassRuleCount = GET_UShort();
FORGET_Frame();
cpcs->ChainPosClassRule = NULL;
if ( ALLOC_ARRAY( cpcs->ChainPosClassRule, count,
HB_ChainPosClassRule ) )
return error;
cpcr = cpcs->ChainPosClassRule;
for ( n = 0; n < count; n++ )
{
if ( ACCESS_Frame( 2L ) )
goto Fail;
new_offset = GET_UShort() + base_offset;
FORGET_Frame();
cur_offset = FILE_Pos();
if ( FILE_Seek( new_offset ) ||
( error = Load_ChainPosClassRule( ccpf2, &cpcr[n],
stream ) ) != HB_Err_Ok )
goto Fail;
(void)FILE_Seek( cur_offset );
}
return HB_Err_Ok;
Fail:
for ( m = 0; m < n; m++ )
Free_ChainPosClassRule( &cpcr[m] );
FREE( cpcr );
return error;
}
| 0 |
linux | f9432c5ec8b1e9a09b9b0e5569e3c73db8de432a | NOT_APPLICABLE | NOT_APPLICABLE | static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
{
struct tty_struct *tty = dev->port.tty;
struct sk_buff *skb;
int inserted = 0;
if (!tty)
return;
BT_DBG("dev %p tty %p", dev, tty);
rfcomm_dlc_lock(dev->dlc);
while ((skb = skb_dequeue(&dev->pending))) {
inserted += tty_insert_flip_string(tty, skb->data, skb->len);
kfree_skb(skb);
}
rfcomm_dlc_unlock(dev->dlc);
if (inserted > 0)
tty_flip_buffer_push(tty);
}
| 0 |
linux | 17839856fd588f4ab6b789f482ed3ffd7c403e1f | NOT_APPLICABLE | NOT_APPLICABLE | void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
bool make_dirty)
{
unsigned long index;
/*
* TODO: this can be optimized for huge pages: if a series of pages is
* physically contiguous and part of the same compound page, then a
* single operation to the head page should suffice.
*/
if (!make_dirty) {
unpin_user_pages(pages, npages);
return;
}
for (index = 0; index < npages; index++) {
struct page *page = compound_head(pages[index]);
/*
* Checking PageDirty at this point may race with
* clear_page_dirty_for_io(), but that's OK. Two key
* cases:
*
* 1) This code sees the page as already dirty, so it
* skips the call to set_page_dirty(). That could happen
* because clear_page_dirty_for_io() called
* page_mkclean(), followed by set_page_dirty().
* However, now the page is going to get written back,
* which meets the original intention of setting it
* dirty, so all is well: clear_page_dirty_for_io() goes
* on to call TestClearPageDirty(), and write the page
* back.
*
* 2) This code sees the page as clean, so it calls
* set_page_dirty(). The page stays dirty, despite being
* written back, so it gets written back again in the
* next writeback cycle. This is harmless.
*/
if (!PageDirty(page))
set_page_dirty_lock(page);
unpin_user_page(page);
}
} | 0 |
linux | 9b0971ca7fc75daca80c0bb6c02e96059daea90a | NOT_APPLICABLE | NOT_APPLICABLE | int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
{
struct file *source_kvm_file;
struct kvm *source_kvm;
struct kvm_sev_info source_sev, *mirror_sev;
int ret;
source_kvm_file = fget(source_fd);
if (!file_is_kvm(source_kvm_file)) {
ret = -EBADF;
goto e_source_put;
}
source_kvm = source_kvm_file->private_data;
mutex_lock(&source_kvm->lock);
if (!sev_guest(source_kvm)) {
ret = -EINVAL;
goto e_source_unlock;
}
/* Mirrors of mirrors should work, but let's not get silly */
if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
ret = -EINVAL;
goto e_source_unlock;
}
memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
sizeof(source_sev));
/*
* The mirror kvm holds an enc_context_owner ref so its asid can't
* disappear until we're done with it
*/
kvm_get_kvm(source_kvm);
fput(source_kvm_file);
mutex_unlock(&source_kvm->lock);
mutex_lock(&kvm->lock);
if (sev_guest(kvm)) {
ret = -EINVAL;
goto e_mirror_unlock;
}
/* Set enc_context_owner and copy its encryption context over */
mirror_sev = &to_kvm_svm(kvm)->sev_info;
mirror_sev->enc_context_owner = source_kvm;
mirror_sev->active = true;
mirror_sev->asid = source_sev.asid;
mirror_sev->fd = source_sev.fd;
mirror_sev->es_active = source_sev.es_active;
mirror_sev->handle = source_sev.handle;
/*
* Do not copy ap_jump_table. Since the mirror does not share the same
* KVM contexts as the original, and they may have different
* memory-views.
*/
mutex_unlock(&kvm->lock);
return 0;
e_mirror_unlock:
mutex_unlock(&kvm->lock);
kvm_put_kvm(source_kvm);
return ret;
e_source_unlock:
mutex_unlock(&source_kvm->lock);
e_source_put:
if (source_kvm_file)
fput(source_kvm_file);
return ret;
} | 0 |
Chrome | 7cf563aba8f4b3bab68e9bfe43824d952241dcf7 | CVE-2012-2890 | CWE-399 | void HostPortAllocatorSession::SendSessionRequest(const std::string& host,
int port) {
GURL url("https://" + host + ":" + base::IntToString(port) +
GetSessionRequestUrl() + "&sn=1");
scoped_ptr<UrlFetcher> url_fetcher(new UrlFetcher(url, UrlFetcher::GET));
url_fetcher->SetRequestContext(url_context_);
url_fetcher->SetHeader("X-Talk-Google-Relay-Auth", relay_token());
url_fetcher->SetHeader("X-Google-Relay-Auth", relay_token());
url_fetcher->SetHeader("X-Stream-Type", "chromoting");
url_fetcher->Start(base::Bind(&HostPortAllocatorSession::OnSessionRequestDone,
base::Unretained(this), url_fetcher.get()));
url_fetchers_.insert(url_fetcher.release());
}
| 1 |
Android | cc274e2abe8b2a6698a5c47d8aa4bb45f1f9538d | CVE-2016-2464 | CWE-20 | int Track::Info::CopyStr(char* Info::*str, Info& dst_) const {
if (str == static_cast<char * Info::*>(NULL))
return -1;
char*& dst = dst_.*str;
if (dst) // should be NULL already
return -1;
const char* const src = this->*str;
if (src == NULL)
return 0;
const size_t len = strlen(src);
dst = new (std::nothrow) char[len + 1];
if (dst == NULL)
return -1;
strcpy(dst, src);
return 0;
}
| 1 |
linux | 3a4d44b6162555070194e486ff6b3799a8d323a2 | NOT_APPLICABLE | NOT_APPLICABLE | COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
struct compat_timespec __user *, tp)
{
long err;
mm_segment_t oldfs;
struct timespec ts;
oldfs = get_fs();
set_fs(KERNEL_DS);
err = sys_clock_getres(which_clock,
(struct timespec __user *) &ts);
set_fs(oldfs);
if (!err && tp && compat_put_timespec(&ts, tp))
return -EFAULT;
return err;
} | 0 |
Chrome | da790f920bbc169a6805a4fb83b4c2ab09532d91 | NOT_APPLICABLE | NOT_APPLICABLE | DataReductionProxyConfig::DataReductionProxyConfig(
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner,
network::NetworkConnectionTracker* network_connection_tracker,
std::unique_ptr<DataReductionProxyConfigValues> config_values,
DataReductionProxyConfigurator* configurator)
: unreachable_(false),
enabled_by_user_(false),
config_values_(std::move(config_values)),
io_task_runner_(io_task_runner),
ui_task_runner_(ui_task_runner),
network_connection_tracker_(network_connection_tracker),
configurator_(configurator),
connection_type_(network::mojom::ConnectionType::CONNECTION_UNKNOWN),
network_properties_manager_(nullptr),
weak_factory_(this) {
DCHECK(io_task_runner_);
DCHECK(network_connection_tracker_);
DCHECK(configurator);
thread_checker_.DetachFromThread();
}
| 0 |
ImageMagick | 4e81ce8b07219c69a9aeccb0f7f7b927ca6db74c | NOT_APPLICABLE | NOT_APPLICABLE | MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
return(cache_info->virtual_pixel_method);
}
| 0 |
jq | 8eb1367ca44e772963e704a700ef72ae2e12babd | NOT_APPLICABLE | NOT_APPLICABLE | static void parser_free(struct jv_parser* p) {
parser_reset(p);
jv_free(p->path);
jv_free(p->output);
jv_mem_free(p->stack);
jv_mem_free(p->tokenbuf);
jvp_dtoa_context_free(&p->dtoa);
}
| 0 |
libcomps | e3a5d056633677959ad924a51758876d415e7046 | NOT_APPLICABLE | NOT_APPLICABLE | static void comps_objrtree_destroy(COMPS_ObjRTree * rt) {
comps_hslist_destroy(&(rt->subnodes));
}
| 0 |
gpac | ad18ece95fa064efc0995c4ab2c985f77fb166ec | NOT_APPLICABLE | NOT_APPLICABLE | GF_Err gf_isom_hint_sample_description_data(GF_ISOFile *the_file, u32 trackNumber, GF_ISOTrackID SourceTrackID, u32 StreamDescriptionIndex, u16 DataLength, u32 offsetInDescription, u8 AtBegin)
{
GF_TrackBox *trak;
GF_HintSampleEntryBox *entry;
u32 count;
u16 refIndex;
GF_HintPacket *pck;
GF_StreamDescDTE *dte;
GF_Err e;
GF_TrackReferenceTypeBox *hint;
trak = gf_isom_get_track_from_file(the_file, trackNumber);
if (!trak || !IsHintTrack(trak)) return GF_BAD_PARAM;
e = Media_GetSampleDesc(trak->Media, trak->Media->information->sampleTable->currentEntryIndex, (GF_SampleEntryBox **) &entry, &count);
if (e) return e;
if (!entry->hint_sample) return GF_BAD_PARAM;
count = gf_list_count(entry->hint_sample->packetTable);
if (!count) return GF_BAD_PARAM;
pck = (GF_HintPacket *)gf_list_get(entry->hint_sample->packetTable, count - 1);
dte = (GF_StreamDescDTE *) NewDTE(3);
dte->byteOffset = offsetInDescription;
dte->dataLength = DataLength;
dte->streamDescIndex = StreamDescriptionIndex;
if (SourceTrackID == trak->Header->trackID) {
dte->trackRefIndex = (s8) -1;
} else {
//get (or set) the track reference index
e = Track_FindRef(trak, GF_ISOM_REF_HINT, &hint);
if (e) return e;
e = reftype_AddRefTrack(hint, SourceTrackID, &refIndex);
if (e) return e;
//WARNING: IN QT, MUST BE 0-based !!!
dte->trackRefIndex = (u8) (refIndex - 1);
}
return gf_isom_hint_pck_add_dte(pck, (GF_GenericDTE *)dte, AtBegin);
} | 0 |
Bento4 | 53499d8d4c69142137c7c7f0097a444783fdeb90 | NOT_APPLICABLE | NOT_APPLICABLE | AP4_HvccAtom::Create(AP4_Size size, AP4_ByteStream& stream)
{
// read the raw bytes in a buffer
unsigned int payload_size = size-AP4_ATOM_HEADER_SIZE;
AP4_DataBuffer payload_data(payload_size);
AP4_Result result = stream.Read(payload_data.UseData(), payload_size);
if (AP4_FAILED(result)) return NULL;
return new AP4_HvccAtom(size, payload_data.GetData());
} | 0 |
linux | c08eadca1bdfa099e20a32f8fa4b52b2f672236d | NOT_APPLICABLE | NOT_APPLICABLE | static void em28xx_pre_card_setup(struct em28xx *dev)
{
/*
* Set the initial XCLK and I2C clock values based on the board
* definition
*/
em28xx_set_xclk_i2c_speed(dev);
/* request some modules */
switch (dev->model) {
case EM2861_BOARD_PLEXTOR_PX_TV100U:
/* Sets the msp34xx I2S speed */
dev->i2s_speed = 2048000;
break;
case EM2861_BOARD_KWORLD_PVRTV_300U:
case EM2880_BOARD_KWORLD_DVB_305U:
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0x6d);
usleep_range(10000, 11000);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0x7d);
usleep_range(10000, 11000);
break;
case EM2870_BOARD_COMPRO_VIDEOMATE:
/*
* TODO: someone can do some cleanup here...
* not everything's needed
*/
em28xx_write_reg(dev, EM2880_R04_GPO, 0x00);
usleep_range(10000, 11000);
em28xx_write_reg(dev, EM2880_R04_GPO, 0x01);
usleep_range(10000, 11000);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd);
msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfc);
msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xdc);
msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfc);
msleep(70);
break;
case EM2870_BOARD_TERRATEC_XS_MT2060:
/*
* this device needs some gpio writes to get the DVB-T
* demod work
*/
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xde);
msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
msleep(70);
break;
case EM2870_BOARD_PINNACLE_PCTV_DVB:
/*
* this device needs some gpio writes to get the
* DVB-T demod work
*/
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xde);
msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
msleep(70);
break;
case EM2820_BOARD_GADMEI_UTV310:
case EM2820_BOARD_MSI_VOX_USB_2:
/* enables audio for that devices */
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd);
break;
case EM2882_BOARD_KWORLD_ATSC_315U:
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
usleep_range(10000, 11000);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
usleep_range(10000, 11000);
em28xx_write_reg(dev, EM2880_R04_GPO, 0x00);
usleep_range(10000, 11000);
em28xx_write_reg(dev, EM2880_R04_GPO, 0x08);
usleep_range(10000, 11000);
break;
case EM2860_BOARD_KAIOMY_TVNPC_U2:
em28xx_write_regs(dev, EM28XX_R0F_XCLK, "\x07", 1);
em28xx_write_regs(dev, EM28XX_R06_I2C_CLK, "\x40", 1);
em28xx_write_regs(dev, 0x0d, "\x42", 1);
em28xx_write_regs(dev, 0x08, "\xfd", 1);
usleep_range(10000, 11000);
em28xx_write_regs(dev, 0x08, "\xff", 1);
usleep_range(10000, 11000);
em28xx_write_regs(dev, 0x08, "\x7f", 1);
usleep_range(10000, 11000);
em28xx_write_regs(dev, 0x08, "\x6b", 1);
break;
case EM2860_BOARD_EASYCAP:
em28xx_write_regs(dev, 0x08, "\xf8", 1);
break;
case EM2820_BOARD_IODATA_GVMVP_SZ:
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xff);
msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xf7);
usleep_range(10000, 11000);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfe);
msleep(70);
em28xx_write_reg(dev, EM2820_R08_GPIO_CTRL, 0xfd);
msleep(70);
break;
case EM2860_BOARD_TERRATEC_GRABBY:
/*
* HACK?: Ensure AC97 register reading is reliable before
* proceeding. In practice, this will wait about 1.6 seconds.
*/
em28xx_wait_until_ac97_features_equals(dev, 0x6a90);
break;
}
em28xx_gpio_set(dev, dev->board.tuner_gpio);
em28xx_set_mode(dev, EM28XX_ANALOG_MODE);
/* Unlock device */
em28xx_set_mode(dev, EM28XX_SUSPEND);
} | 0 |
linux | 2f95fa5c955d0a9987ffdc3a095e2f4e62c5f2a9 | NOT_APPLICABLE | NOT_APPLICABLE | static struct bfq_queue *
bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
{
struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue;
unsigned int limit = in_serv_bfqq->inject_limit;
/*
* If
* - bfqq is not weight-raised and therefore does not carry
* time-critical I/O,
* or
* - regardless of whether bfqq is weight-raised, bfqq has
* however a long think time, during which it can absorb the
* effect of an appropriate number of extra I/O requests
* from other queues (see bfq_update_inject_limit for
* details on the computation of this number);
* then injection can be performed without restrictions.
*/
bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 ||
!bfq_bfqq_has_short_ttime(in_serv_bfqq);
/*
* If
* - the baseline total service time could not be sampled yet,
* so the inject limit happens to be still 0, and
* - a lot of time has elapsed since the plugging of I/O
* dispatching started, so drive speed is being wasted
* significantly;
* then temporarily raise inject limit to one request.
*/
if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 &&
bfq_bfqq_wait_request(in_serv_bfqq) &&
time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies +
bfqd->bfq_slice_idle)
)
limit = 1;
if (bfqd->rq_in_driver >= limit)
return NULL;
/*
* Linear search of the source queue for injection; but, with
* a high probability, very few steps are needed to find a
* candidate queue, i.e., a queue with enough budget left for
* its next request. In fact:
* - BFQ dynamically updates the budget of every queue so as
* to accommodate the expected backlog of the queue;
* - if a queue gets all its requests dispatched as injected
* service, then the queue is removed from the active list
* (and re-added only if it gets new requests, but then it
* is assigned again enough budget for its new backlog).
*/
list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
(in_serv_always_inject || bfqq->wr_coeff > 1) &&
bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
bfq_bfqq_budget_left(bfqq)) {
/*
* Allow for only one large in-flight request
* on non-rotational devices, for the
* following reason. On non-rotationl drives,
* large requests take much longer than
* smaller requests to be served. In addition,
* the drive prefers to serve large requests
* w.r.t. to small ones, if it can choose. So,
* having more than one large requests queued
* in the drive may easily make the next first
* request of the in-service queue wait for so
* long to break bfqq's service guarantees. On
* the bright side, large requests let the
* drive reach a very high throughput, even if
* there is only one in-flight large request
* at a time.
*/
if (blk_queue_nonrot(bfqd->queue) &&
blk_rq_sectors(bfqq->next_rq) >=
BFQQ_SECT_THR_NONROT)
limit = min_t(unsigned int, 1, limit);
else
limit = in_serv_bfqq->inject_limit;
if (bfqd->rq_in_driver < limit) {
bfqd->rqs_injected = true;
return bfqq;
}
}
return NULL; | 0 |
ImageMagick | ecf7c6b288e11e7e7f75387c5e9e93e423b98397 | NOT_APPLICABLE | NOT_APPLICABLE | MagickExport MagickBooleanType DrawAffineImage(Image *image,
const Image *source,const AffineMatrix *affine,ExceptionInfo *exception)
{
AffineMatrix
inverse_affine;
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
PixelInfo
zero;
PointInfo
extent[4],
min,
max;
register ssize_t
i;
SegmentInfo
edge;
ssize_t
start,
stop,
y;
/*
Determine bounding box.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(source != (const Image *) NULL);
assert(source->signature == MagickCoreSignature);
assert(affine != (AffineMatrix *) NULL);
extent[0].x=0.0;
extent[0].y=0.0;
extent[1].x=(double) source->columns-1.0;
extent[1].y=0.0;
extent[2].x=(double) source->columns-1.0;
extent[2].y=(double) source->rows-1.0;
extent[3].x=0.0;
extent[3].y=(double) source->rows-1.0;
for (i=0; i < 4; i++)
{
PointInfo
point;
point=extent[i];
extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx;
extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
/*
Affine transform image.
*/
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
edge.x1=MagickMax(min.x,0.0);
edge.y1=MagickMax(min.y,0.0);
edge.x2=MagickMin(max.x,(double) image->columns-1.0);
edge.y2=MagickMin(max.y,(double) image->rows-1.0);
inverse_affine=InverseAffineMatrix(affine);
GetPixelInfo(image,&zero);
start=(ssize_t) ceil(edge.y1-0.5);
stop=(ssize_t) floor(edge.y2+0.5);
source_view=AcquireVirtualCacheView(source,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,image,stop-start,1)
#endif
for (y=start; y <= stop; y++)
{
PixelInfo
composite,
pixel;
PointInfo
point;
register ssize_t
x;
register Quantum
*magick_restrict q;
SegmentInfo
inverse_edge;
ssize_t
x_offset;
inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge);
if (inverse_edge.x2 < inverse_edge.x1)
continue;
q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1-
0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1),
1,exception);
if (q == (Quantum *) NULL)
continue;
pixel=zero;
composite=zero;
x_offset=0;
for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++)
{
point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+
inverse_affine.tx;
point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+
inverse_affine.ty;
status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel,
point.x,point.y,&pixel,exception);
if (status == MagickFalse)
break;
GetPixelInfoPixel(image,q,&composite);
CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha,
&composite);
SetPixelViaPixelInfo(image,&composite,q);
x_offset++;
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
| 0 |
weechat | 40ccacb4330a64802b1f1e28ed9a6b6d3ca9197f | NOT_APPLICABLE | NOT_APPLICABLE | irc_server_apply_command_line_options (struct t_irc_server *server,
int argc, char **argv)
{
int i, index_option;
char *pos, *option_name, *ptr_value, *value_boolean[2] = { "off", "on" };
for (i = 0; i < argc; i++)
{
if (argv[i][0] == '-')
{
pos = strchr (argv[i], '=');
if (pos)
{
option_name = weechat_strndup (argv[i] + 1, pos - argv[i] - 1);
ptr_value = pos + 1;
}
else
{
option_name = strdup (argv[i] + 1);
ptr_value = value_boolean[1];
}
if (option_name)
{
if (weechat_strcasecmp (option_name, "temp") == 0)
{
/* temporary server, not saved */
server->temp_server = 1;
}
else
{
index_option = irc_server_search_option (option_name);
if (index_option < 0)
{
/* look if option is negative, like "-noxxx" */
if (weechat_strncasecmp (argv[i], "-no", 3) == 0)
{
free (option_name);
option_name = strdup (argv[i] + 3);
index_option = irc_server_search_option (option_name);
ptr_value = value_boolean[0];
}
}
if (index_option >= 0)
{
weechat_config_option_set (server->options[index_option],
ptr_value, 1);
}
}
free (option_name);
}
}
}
} | 0 |
linux | 983d8e60f50806f90534cc5373d0ce867e5aaf79 | NOT_APPLICABLE | NOT_APPLICABLE | xfs_file_ioctl(
struct file *filp,
unsigned int cmd,
unsigned long p)
{
struct inode *inode = file_inode(filp);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
void __user *arg = (void __user *)p;
int error;
trace_xfs_file_ioctl(ip);
switch (cmd) {
case FITRIM:
return xfs_ioc_trim(mp, arg);
case FS_IOC_GETFSLABEL:
return xfs_ioc_getlabel(mp, arg);
case FS_IOC_SETFSLABEL:
return xfs_ioc_setlabel(filp, mp, arg);
case XFS_IOC_ALLOCSP:
case XFS_IOC_FREESP:
case XFS_IOC_ALLOCSP64:
case XFS_IOC_FREESP64: {
xfs_flock64_t bf;
if (copy_from_user(&bf, arg, sizeof(bf)))
return -EFAULT;
return xfs_ioc_space(filp, &bf);
}
case XFS_IOC_DIOINFO: {
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
struct dioattr da;
da.d_mem = da.d_miniosz = target->bt_logical_sectorsize;
da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
if (copy_to_user(arg, &da, sizeof(da)))
return -EFAULT;
return 0;
}
case XFS_IOC_FSBULKSTAT_SINGLE:
case XFS_IOC_FSBULKSTAT:
case XFS_IOC_FSINUMBERS:
return xfs_ioc_fsbulkstat(filp, cmd, arg);
case XFS_IOC_BULKSTAT:
return xfs_ioc_bulkstat(filp, cmd, arg);
case XFS_IOC_INUMBERS:
return xfs_ioc_inumbers(mp, cmd, arg);
case XFS_IOC_FSGEOMETRY_V1:
return xfs_ioc_fsgeometry(mp, arg, 3);
case XFS_IOC_FSGEOMETRY_V4:
return xfs_ioc_fsgeometry(mp, arg, 4);
case XFS_IOC_FSGEOMETRY:
return xfs_ioc_fsgeometry(mp, arg, 5);
case XFS_IOC_AG_GEOMETRY:
return xfs_ioc_ag_geometry(mp, arg);
case XFS_IOC_GETVERSION:
return put_user(inode->i_generation, (int __user *)arg);
case XFS_IOC_FSGETXATTRA:
return xfs_ioc_fsgetxattra(ip, arg);
case XFS_IOC_GETBMAP:
case XFS_IOC_GETBMAPA:
case XFS_IOC_GETBMAPX:
return xfs_ioc_getbmap(filp, cmd, arg);
case FS_IOC_GETFSMAP:
return xfs_ioc_getfsmap(ip, arg);
case XFS_IOC_SCRUB_METADATA:
return xfs_ioc_scrub_metadata(filp, arg);
case XFS_IOC_FD_TO_HANDLE:
case XFS_IOC_PATH_TO_HANDLE:
case XFS_IOC_PATH_TO_FSHANDLE: {
xfs_fsop_handlereq_t hreq;
if (copy_from_user(&hreq, arg, sizeof(hreq)))
return -EFAULT;
return xfs_find_handle(cmd, &hreq);
}
case XFS_IOC_OPEN_BY_HANDLE: {
xfs_fsop_handlereq_t hreq;
if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
return -EFAULT;
return xfs_open_by_handle(filp, &hreq);
}
case XFS_IOC_READLINK_BY_HANDLE: {
xfs_fsop_handlereq_t hreq;
if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
return -EFAULT;
return xfs_readlink_by_handle(filp, &hreq);
}
case XFS_IOC_ATTRLIST_BY_HANDLE:
return xfs_attrlist_by_handle(filp, arg);
case XFS_IOC_ATTRMULTI_BY_HANDLE:
return xfs_attrmulti_by_handle(filp, arg);
case XFS_IOC_SWAPEXT: {
struct xfs_swapext sxp;
if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t)))
return -EFAULT;
error = mnt_want_write_file(filp);
if (error)
return error;
error = xfs_ioc_swapext(&sxp);
mnt_drop_write_file(filp);
return error;
}
case XFS_IOC_FSCOUNTS: {
xfs_fsop_counts_t out;
xfs_fs_counts(mp, &out);
if (copy_to_user(arg, &out, sizeof(out)))
return -EFAULT;
return 0;
}
case XFS_IOC_SET_RESBLKS: {
xfs_fsop_resblks_t inout;
uint64_t in;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (xfs_is_readonly(mp))
return -EROFS;
if (copy_from_user(&inout, arg, sizeof(inout)))
return -EFAULT;
error = mnt_want_write_file(filp);
if (error)
return error;
/* input parameter is passed in resblks field of structure */
in = inout.resblks;
error = xfs_reserve_blocks(mp, &in, &inout);
mnt_drop_write_file(filp);
if (error)
return error;
if (copy_to_user(arg, &inout, sizeof(inout)))
return -EFAULT;
return 0;
}
case XFS_IOC_GET_RESBLKS: {
xfs_fsop_resblks_t out;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
error = xfs_reserve_blocks(mp, NULL, &out);
if (error)
return error;
if (copy_to_user(arg, &out, sizeof(out)))
return -EFAULT;
return 0;
}
case XFS_IOC_FSGROWFSDATA: {
struct xfs_growfs_data in;
if (copy_from_user(&in, arg, sizeof(in)))
return -EFAULT;
error = mnt_want_write_file(filp);
if (error)
return error;
error = xfs_growfs_data(mp, &in);
mnt_drop_write_file(filp);
return error;
}
case XFS_IOC_FSGROWFSLOG: {
struct xfs_growfs_log in;
if (copy_from_user(&in, arg, sizeof(in)))
return -EFAULT;
error = mnt_want_write_file(filp);
if (error)
return error;
error = xfs_growfs_log(mp, &in);
mnt_drop_write_file(filp);
return error;
}
case XFS_IOC_FSGROWFSRT: {
xfs_growfs_rt_t in;
if (copy_from_user(&in, arg, sizeof(in)))
return -EFAULT;
error = mnt_want_write_file(filp);
if (error)
return error;
error = xfs_growfs_rt(mp, &in);
mnt_drop_write_file(filp);
return error;
}
case XFS_IOC_GOINGDOWN: {
uint32_t in;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (get_user(in, (uint32_t __user *)arg))
return -EFAULT;
return xfs_fs_goingdown(mp, in);
}
case XFS_IOC_ERROR_INJECTION: {
xfs_error_injection_t in;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (copy_from_user(&in, arg, sizeof(in)))
return -EFAULT;
return xfs_errortag_add(mp, in.errtag);
}
case XFS_IOC_ERROR_CLEARALL:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return xfs_errortag_clearall(mp);
case XFS_IOC_FREE_EOFBLOCKS: {
struct xfs_fs_eofblocks eofb;
struct xfs_icwalk icw;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (xfs_is_readonly(mp))
return -EROFS;
if (copy_from_user(&eofb, arg, sizeof(eofb)))
return -EFAULT;
error = xfs_fs_eofblocks_from_user(&eofb, &icw);
if (error)
return error;
trace_xfs_ioc_free_eofblocks(mp, &icw, _RET_IP_);
sb_start_write(mp->m_super);
error = xfs_blockgc_free_space(mp, &icw);
sb_end_write(mp->m_super);
return error;
}
default:
return -ENOTTY;
}
} | 0 |
libexpat | c4bf96bb51dd2a1b0e185374362ee136fe2c9d7f | NOT_APPLICABLE | NOT_APPLICABLE | XML_SetDoctypeDeclHandler(XML_Parser parser,
XML_StartDoctypeDeclHandler start,
XML_EndDoctypeDeclHandler end)
{
if (parser == NULL)
return;
startDoctypeDeclHandler = start;
endDoctypeDeclHandler = end;
} | 0 |
libgxps | 672c65ea8cbd2bcfd82a6b6498a4f1eb9daf5ec5 | NOT_APPLICABLE | NOT_APPLICABLE | gxps_images_guess_content_type (GXPSArchive *zip,
const gchar *image_uri)
{
GInputStream *stream;
guchar buffer[1024];
gssize bytes_read;
gchar *mime_type;
stream = gxps_archive_open (zip, image_uri);
if (!stream)
return NULL;
bytes_read = g_input_stream_read (stream, buffer, 1024, NULL, NULL);
mime_type = g_content_type_guess (NULL, buffer, bytes_read, NULL);
g_object_unref (stream);
return mime_type;
} | 0 |
linux | 15291164b22a357cb211b618adfef4fa82fc0de3 | NOT_APPLICABLE | NOT_APPLICABLE | int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
{
transaction_t *transaction = handle->h_transaction;
journal_t *journal = transaction->t_journal;
struct journal_head *jh;
int drop_reserve = 0;
int err = 0;
int was_modified = 0;
BUFFER_TRACE(bh, "entry");
jbd_lock_bh_state(bh);
spin_lock(&journal->j_list_lock);
if (!buffer_jbd(bh))
goto not_jbd;
jh = bh2jh(bh);
/* Critical error: attempting to delete a bitmap buffer, maybe?
* Don't do any jbd operations, and return an error. */
if (!J_EXPECT_JH(jh, !jh->b_committed_data,
"inconsistent data on disk")) {
err = -EIO;
goto not_jbd;
}
/* keep track of wether or not this transaction modified us */
was_modified = jh->b_modified;
/*
* The buffer's going from the transaction, we must drop
* all references -bzzz
*/
jh->b_modified = 0;
if (jh->b_transaction == handle->h_transaction) {
J_ASSERT_JH(jh, !jh->b_frozen_data);
/* If we are forgetting a buffer which is already part
* of this transaction, then we can just drop it from
* the transaction immediately. */
clear_buffer_dirty(bh);
clear_buffer_jbddirty(bh);
JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
/*
* we only want to drop a reference if this transaction
* modified the buffer
*/
if (was_modified)
drop_reserve = 1;
/*
* We are no longer going to journal this buffer.
* However, the commit of this transaction is still
* important to the buffer: the delete that we are now
* processing might obsolete an old log entry, so by
* committing, we can satisfy the buffer's checkpoint.
*
* So, if we have a checkpoint on the buffer, we should
* now refile the buffer on our BJ_Forget list so that
* we know to remove the checkpoint after we commit.
*/
if (jh->b_cp_transaction) {
__jbd2_journal_temp_unlink_buffer(jh);
__jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
} else {
__jbd2_journal_unfile_buffer(jh);
if (!buffer_jbd(bh)) {
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
__bforget(bh);
goto drop;
}
}
} else if (jh->b_transaction) {
J_ASSERT_JH(jh, (jh->b_transaction ==
journal->j_committing_transaction));
/* However, if the buffer is still owned by a prior
* (committing) transaction, we can't drop it yet... */
JBUFFER_TRACE(jh, "belongs to older transaction");
/* ... but we CAN drop it from the new transaction if we
* have also modified it since the original commit. */
if (jh->b_next_transaction) {
J_ASSERT(jh->b_next_transaction == transaction);
jh->b_next_transaction = NULL;
/*
* only drop a reference if this transaction modified
* the buffer
*/
if (was_modified)
drop_reserve = 1;
}
}
not_jbd:
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh);
__brelse(bh);
drop:
if (drop_reserve) {
/* no need to reserve log space for this block -bzzz */
handle->h_buffer_credits++;
}
return err;
}
| 0 |
bash | 863d31ae775d56b785dc5b0105b6d251515d81d5 | NOT_APPLICABLE | NOT_APPLICABLE | init_funcname_var ()
{
SHELL_VAR *v;
v = find_variable ("FUNCNAME");
if (v)
return v;
#if defined (ARRAY_VARS)
INIT_DYNAMIC_ARRAY_VAR ("FUNCNAME", get_funcname, null_array_assign);
#else
INIT_DYNAMIC_VAR ("FUNCNAME", (char *)NULL, get_funcname, null_assign);
#endif
VSETATTR (v, att_invisible|att_noassign);
return v;
} | 0 |
ghostscript | f597300439e62f5e921f0d7b1e880b5c1a1f1607 | NOT_APPLICABLE | NOT_APPLICABLE | add_mrange(fz_context *ctx, pdf_cmap *cmap, unsigned int low, int *out, int len)
{
int out_pos;
if (cmap->dlen + len + 1 > cmap->dcap)
{
int new_cap = cmap->dcap ? cmap->dcap * 2 : 256;
cmap->dict = fz_resize_array(ctx, cmap->dict, new_cap, sizeof *cmap->dict);
cmap->dcap = new_cap;
}
out_pos = cmap->dlen;
cmap->dict[out_pos] = len;
memcpy(&cmap->dict[out_pos+1], out, sizeof(int)*len);
cmap->dlen += len + 1;
add_range(ctx, cmap, low, low, out_pos, 1, 1);
}
| 0 |
linux | f43f39958beb206b53292801e216d9b8a660f087 | NOT_APPLICABLE | NOT_APPLICABLE | static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct crypto_alg *alg;
struct crypto_user_alg *p = nlmsg_data(nlh);
int err;
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
return -EINVAL;
alg = crypto_alg_match(p, 1);
if (!alg)
return -ENOENT;
/* We can not unregister core algorithms such as aes-generic.
* We would loose the reference in the crypto_alg_list to this algorithm
* if we try to unregister. Unregistering such an algorithm without
* removing the module is not possible, so we restrict to crypto
* instances that are build from templates. */
err = -EINVAL;
if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
goto drop_alg;
err = -EBUSY;
if (refcount_read(&alg->cra_refcnt) > 2)
goto drop_alg;
err = crypto_unregister_instance((struct crypto_instance *)alg);
drop_alg:
crypto_mod_put(alg);
return err;
}
| 0 |
hhvm | 65c95a01541dd2fbc9c978ac53bed235b5376686 | NOT_APPLICABLE | NOT_APPLICABLE | static Variant HHVM_METHOD(ZipArchive, getFromIndex, int64_t index,
int64_t length, int64_t flags) {
auto zipDir = getResource<ZipDirectory>(this_, "zipDir");
FAIL_IF_INVALID_ZIPARCHIVE(getFromIndex, zipDir);
FAIL_IF_INVALID_INDEX(index);
if (length < 0) {
return empty_string_variant();
}
struct zip_stat zipStat;
if (zip_stat_index(zipDir->getZip(), index, 0, &zipStat) != 0) {
return false;
}
if (zipStat.size < 1) {
return empty_string_variant();
}
auto zipFile = zip_fopen_index(zipDir->getZip(), index, flags);
FAIL_IF_INVALID_PTR(zipFile);
if (length == 0) {
length = zipStat.size;
}
StringBuffer sb(length);
auto buf = sb.appendCursor(length);
auto n = zip_fread(zipFile, buf, length);
if (n > 0) {
sb.resize(n);
return sb.detach();
}
return empty_string_variant();
} | 0 |
libheif | 2710c930918609caaf0a664e9c7bc3dce05d5b58 | NOT_APPLICABLE | NOT_APPLICABLE | Error Box_iprp::parse(BitstreamRange& range)
{
//parse_full_box_header(range);
return read_children(range);
} | 0 |
linux | dee1f973ca341c266229faa5a1a5bb268bed3531 | CVE-2012-4508 | CWE-362 | static int ext4_split_unwritten_extents(handle_t *handle,
struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path *path,
int flags)
{
ext4_lblk_t eof_block;
ext4_lblk_t ee_block;
struct ext4_extent *ex;
unsigned int ee_len;
int split_flag = 0, depth;
ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino,
(unsigned long long)map->m_lblk, map->m_len);
eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
if (eof_block < map->m_lblk + map->m_len)
eof_block = map->m_lblk + map->m_len;
/*
* It is safe to convert extent to initialized via explicit
* zeroout only if extent is fully insde i_size or new_size.
*/
depth = ext_depth(inode);
ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
split_flag |= EXT4_EXT_MARK_UNINIT2;
flags |= EXT4_GET_BLOCKS_PRE_IO;
return ext4_split_extent(handle, inode, path, map, split_flag, flags);
}
| 1 |
libxml2 | e724879d964d774df9b7969fc846605aa1bac54c | NOT_APPLICABLE | NOT_APPLICABLE | htmlFindEncoding(xmlParserCtxtPtr ctxt) {
const xmlChar *start, *cur, *end;
if ((ctxt == NULL) || (ctxt->input == NULL) ||
(ctxt->input->encoding != NULL) || (ctxt->input->buf == NULL) ||
(ctxt->input->buf->encoder != NULL))
return(NULL);
if ((ctxt->input->cur == NULL) || (ctxt->input->end == NULL))
return(NULL);
start = ctxt->input->cur;
end = ctxt->input->end;
/* we also expect the input buffer to be zero terminated */
if (*end != 0)
return(NULL);
cur = xmlStrcasestr(start, BAD_CAST "HTTP-EQUIV");
if (cur == NULL)
return(NULL);
cur = xmlStrcasestr(cur, BAD_CAST "CONTENT");
if (cur == NULL)
return(NULL);
cur = xmlStrcasestr(cur, BAD_CAST "CHARSET=");
if (cur == NULL)
return(NULL);
cur += 8;
start = cur;
while (((*cur >= 'A') && (*cur <= 'Z')) ||
((*cur >= 'a') && (*cur <= 'z')) ||
((*cur >= '0') && (*cur <= '9')) ||
(*cur == '-') || (*cur == '_') || (*cur == ':') || (*cur == '/'))
cur++;
if (cur == start)
return(NULL);
return(xmlStrndup(start, cur - start));
} | 0 |
linux | 9a564bccb78a76740ea9d75a259942df8143d02c | NOT_APPLICABLE | NOT_APPLICABLE | static int set_ipsecrequest(struct sk_buff *skb,
uint8_t proto, uint8_t mode, int level,
uint32_t reqid, uint8_t family,
const xfrm_address_t *src, const xfrm_address_t *dst)
{
struct sadb_x_ipsecrequest *rq;
u8 *sa;
int socklen = pfkey_sockaddr_len(family);
int size_req;
size_req = sizeof(struct sadb_x_ipsecrequest) +
pfkey_sockaddr_pair_size(family);
rq = skb_put_zero(skb, size_req);
rq->sadb_x_ipsecrequest_len = size_req;
rq->sadb_x_ipsecrequest_proto = proto;
rq->sadb_x_ipsecrequest_mode = mode;
rq->sadb_x_ipsecrequest_level = level;
rq->sadb_x_ipsecrequest_reqid = reqid;
sa = (u8 *) (rq + 1);
if (!pfkey_sockaddr_fill(src, 0, (struct sockaddr *)sa, family) ||
!pfkey_sockaddr_fill(dst, 0, (struct sockaddr *)(sa + socklen), family))
return -EINVAL;
return 0;
} | 0 |
mupdf | 22c47acbd52949421f8c7cb46ea1556827d0fcbf | NOT_APPLICABLE | NOT_APPLICABLE | static void drawpage(fz_context *ctx, fz_document *doc, int pagenum)
{
fz_page *page;
fz_display_list *list = NULL;
fz_device *list_dev = NULL;
int start;
fz_cookie cookie = { 0 };
#if GREY_FALLBACK != 0
fz_device *test_dev = NULL;
int is_color = 0;
#else
int is_color = 2;
#endif
render_details render;
int status;
fz_var(list);
fz_var(list_dev);
fz_var(test_dev);
do
{
start = (showtime ? gettime() : 0);
page = fz_load_page(ctx, doc, pagenum - 1);
/* Calculate Page bounds, transform etc */
get_page_render_details(ctx, page, &render);
/* Make the display list, and see if we need color */
fz_try(ctx)
{
list = fz_new_display_list(ctx, render.bounds);
list_dev = fz_new_list_device(ctx, list);
#if GREY_FALLBACK != 0
test_dev = fz_new_test_device(ctx, &is_color, 0.01f, 0, list_dev);
fz_run_page(ctx, page, test_dev, fz_identity, &cookie);
fz_close_device(ctx, test_dev);
#else
fz_run_page(ctx, page, list_dev, fz_identity, &cookie);
#endif
fz_close_device(ctx, list_dev);
}
fz_always(ctx)
{
#if GREY_FALLBACK != 0
fz_drop_device(ctx, test_dev);
#endif
fz_drop_device(ctx, list_dev);
}
fz_catch(ctx)
{
fz_drop_display_list(ctx, list);
list = NULL;
/* Just continue with no list. Also, we can't do multiple
* threads if we have no list. */
render.num_workers = 1;
}
render.list = list;
#if GREY_FALLBACK != 0
if (list == NULL)
{
/* We need to know about color, but the previous test failed
* (presumably) due to the size of the list. Rerun direct
* from file. */
fz_try(ctx)
{
test_dev = fz_new_test_device(ctx, &is_color, 0.01f, 0, NULL);
fz_run_page(ctx, page, test_dev, fz_identity, &cookie);
fz_close_device(ctx, test_dev);
}
fz_always(ctx)
{
fz_drop_device(ctx, test_dev);
}
fz_catch(ctx)
{
/* We failed. Just give up. */
fz_drop_page(ctx, page);
fz_rethrow(ctx);
}
}
#endif
#if GREY_FALLBACK == 2
/* If we 'possibly' need color, find out if we 'really' need color. */
if (is_color == 1)
{
/* We know that the device has images or shadings in
* colored spaces. We have been told to test exhaustively
* so we know whether to use color or grey rendering. */
is_color = 0;
fz_try(ctx)
{
test_dev = fz_new_test_device(ctx, &is_color, 0.01f, FZ_TEST_OPT_IMAGES | FZ_TEST_OPT_SHADINGS, NULL);
if (list)
fz_run_display_list(ctx, list, test_dev, &fz_identity, &fz_infinite_rect, &cookie);
else
fz_run_page(ctx, page, test_dev, &fz_identity, &cookie);
fz_close_device(ctx, test_dev);
}
fz_always(ctx)
{
fz_drop_device(ctx, test_dev);
}
fz_catch(ctx)
{
fz_drop_display_list(ctx, list);
fz_drop_page(ctx, page);
fz_rethrow(ctx);
}
}
#endif
/* Figure out banding */
initialise_banding(ctx, &render, is_color);
if (bgprint.active && showtime)
{
int end = gettime();
start = end - start;
}
/* If we're not using bgprint, then no need to wait */
if (!bgprint.active)
break;
/* If we are using it, then wait for it to finish. */
status = wait_for_bgprint_to_finish();
if (status == RENDER_OK)
{
/* The background bgprint completed successfully. Drop out of the loop,
* and carry on with our next page. */
break;
}
/* The bgprint in the background failed! This might have been because
* we were using memory etc in the foreground. We'd better ditch
* everything we can and try again. */
fz_drop_display_list(ctx, list);
fz_drop_page(ctx, page);
if (status == RENDER_FATAL)
{
/* We failed because of not being able to output. No point in retrying. */
fz_throw(ctx, FZ_ERROR_GENERIC, "Failed to render page");
}
bgprint.started = 1;
bgprint.solo = 1;
mu_trigger_semaphore(&bgprint.start);
status = wait_for_bgprint_to_finish();
if (status != 0)
{
/* Hard failure */
fz_throw(ctx, FZ_ERROR_GENERIC, "Failed to render page");
}
/* Loop back to reload this page */
}
while (1);
if (showtime)
{
fprintf(stderr, "page %s %d", filename, pagenum);
}
if (bgprint.active)
{
bgprint.started = 1;
bgprint.solo = 0;
bgprint.render = render;
bgprint.filename = filename;
bgprint.pagenum = pagenum;
bgprint.interptime = start;
mu_trigger_semaphore(&bgprint.start);
}
else
{
if (try_render_page(ctx, pagenum, &cookie, start, 0, filename, 0, 0, &render))
{
/* Hard failure */
fz_throw(ctx, FZ_ERROR_GENERIC, "Failed to render page");
}
}
} | 0 |
linux | 36ae3c0a36b7456432fedce38ae2f7bd3e01a563 | NOT_APPLICABLE | NOT_APPLICABLE | void __attribute__((weak)) kvm_arch_irq_bypass_stop(
struct irq_bypass_consumer *cons)
{
}
| 0 |
poppler | e853106b58d6b4b0467dbd6436c9bb1cfbd372cf | NOT_APPLICABLE | NOT_APPLICABLE | void Gfx::doGouraudTriangleShFill(GfxGouraudTriangleShading *shading) {
double x0, y0, x1, y1, x2, y2;
GfxColor color0, color1, color2;
int i;
for (i = 0; i < shading->getNTriangles(); ++i) {
shading->getTriangle(i, &x0, &y0, &color0,
&x1, &y1, &color1,
&x2, &y2, &color2);
gouraudFillTriangle(x0, y0, &color0, x1, y1, &color1, x2, y2, &color2,
shading->getColorSpace()->getNComps(), 0);
}
}
| 0 |
linux | eb178619f930fa2ba2348de332a1ff1c66a31424 | NOT_APPLICABLE | NOT_APPLICABLE | mem_to_page(
void *addr)
{
if ((!is_vmalloc_addr(addr))) {
return virt_to_page(addr);
} else {
return vmalloc_to_page(addr);
}
}
| 0 |
hhvm | e264f04ae825a5d97758130cf8eec99862517e7e | NOT_APPLICABLE | NOT_APPLICABLE | TypedValue HHVM_FUNCTION(array_uintersect,
const Variant& array1,
const Variant& array2,
const Variant& data_compare_func,
const Array& args /* = null array */) {
Variant func = data_compare_func;
Array extra = args;
if (!extra.empty()) {
extra.prepend(func);
func = extra.pop();
}
diff_intersect_body(intersect, extra, false COMMA true COMMA NULL COMMA NULL
COMMA cmp_func COMMA &func);
} | 0 |
Android | 4974dcbd0289a2530df2ee2a25b5f92775df80da | NOT_APPLICABLE | NOT_APPLICABLE | static vpx_codec_err_t ctrl_set_postproc(vpx_codec_alg_priv_t *ctx,
va_list args) {
#if CONFIG_VP9_POSTPROC
vp8_postproc_cfg_t *data = va_arg(args, vp8_postproc_cfg_t *);
if (data) {
ctx->postproc_cfg_set = 1;
ctx->postproc_cfg = *((vp8_postproc_cfg_t *)data);
return VPX_CODEC_OK;
} else {
return VPX_CODEC_INVALID_PARAM;
}
#else
(void)ctx;
(void)args;
return VPX_CODEC_INCAPABLE;
#endif
}
| 0 |
linux | e66eded8309ebf679d3d3c1f5820d1f2ca332c71 | NOT_APPLICABLE | NOT_APPLICABLE | static inline void free_signal_struct(struct signal_struct *sig)
{
taskstats_tgid_free(sig);
sched_autogroup_exit(sig);
kmem_cache_free(signal_cachep, sig);
}
| 0 |
Chrome | 971548cdca2d4c0a6fedd3db0c94372c2a27eac3 | NOT_APPLICABLE | NOT_APPLICABLE | FrameTreeNode* RenderFrameHostImpl::AddChild(
std::unique_ptr<FrameTreeNode> child,
int process_id,
int frame_routing_id) {
CHECK_EQ(process_id, GetProcess()->GetID());
child->render_manager()->Init(GetSiteInstance(),
render_view_host()->GetRoutingID(),
frame_routing_id, MSG_ROUTING_NONE, false);
frame_tree_node_->render_manager()->CreateProxiesForChildFrame(child.get());
children_.push_back(std::move(child));
return children_.back().get();
}
| 0 |
openssl | 55869f594f052561b11a2db6a7c42690051868de | NOT_APPLICABLE | NOT_APPLICABLE | int FuzzerInitialize(int *argc, char ***argv)
{
OPENSSL_init_crypto(OPENSSL_INIT_LOAD_CRYPTO_STRINGS, NULL);
ERR_clear_error();
CRYPTO_free_ex_index(0, -1);
FuzzerSetRand();
return 1;
} | 0 |
libxml2 | 92b9e8c8b3787068565a1820ba575d042f9eec66 | NOT_APPLICABLE | NOT_APPLICABLE | xmlDumpEnumeration(xmlBufferPtr buf, xmlEnumerationPtr cur) {
if ((buf == NULL) || (cur == NULL))
return;
xmlBufferWriteCHAR(buf, cur->name);
if (cur->next == NULL)
xmlBufferWriteChar(buf, ")");
else {
xmlBufferWriteChar(buf, " | ");
xmlDumpEnumeration(buf, cur->next);
}
} | 0 |
ncurses | 790a85dbd4a81d5f5d8dd02a44d84f01512ef443 | NOT_APPLICABLE | NOT_APPLICABLE | ClrToEOS(NCURSES_SP_DCLx NCURSES_CH_T blank)
{
int row, col;
row = SP_PARM->_cursrow;
col = SP_PARM->_curscol;
if (row < 0)
row = 0;
if (col < 0)
col = 0;
UpdateAttrs(SP_PARM, blank);
TPUTS_TRACE("clr_eos");
NCURSES_SP_NAME(tputs) (NCURSES_SP_ARGx
clr_eos,
screen_lines(SP_PARM) - row,
NCURSES_SP_NAME(_nc_outch));
while (col < screen_columns(SP_PARM))
CurScreen(SP_PARM)->_line[row].text[col++] = blank;
for (row++; row < screen_lines(SP_PARM); row++) {
for (col = 0; col < screen_columns(SP_PARM); col++)
CurScreen(SP_PARM)->_line[row].text[col] = blank;
}
} | 0 |
php-src | 7b1898183032eeabc64a086ff040af991cebcd93 | NOT_APPLICABLE | NOT_APPLICABLE | */
PHP_FUNCTION(date_isodate_set)
{
zval *object;
php_date_obj *dateobj;
long y, w, d = 1;
if (zend_parse_method_parameters(ZEND_NUM_ARGS() TSRMLS_CC, getThis(), "Oll|l", &object, date_ce_date, &y, &w, &d) == FAILURE) {
RETURN_FALSE;
}
dateobj = (php_date_obj *) zend_object_store_get_object(object TSRMLS_CC);
DATE_CHECK_INITIALIZED(dateobj->time, DateTime);
dateobj->time->y = y;
dateobj->time->m = 1;
dateobj->time->d = 1;
memset(&dateobj->time->relative, 0, sizeof(dateobj->time->relative));
dateobj->time->relative.d = timelib_daynr_from_weeknr(y, w, d);
dateobj->time->have_relative = 1;
timelib_update_ts(dateobj->time, NULL);
RETURN_ZVAL(object, 1, 0); | 0 |
Chrome | ee7579229ff7e9e5ae28bf53aea069251499d7da | NOT_APPLICABLE | NOT_APPLICABLE | error::Error GLES2DecoderImpl::HandleVertexAttribDivisorANGLE(
uint32 immediate_data_size, const cmds::VertexAttribDivisorANGLE& c) {
if (!features().angle_instanced_arrays) {
LOCAL_SET_GL_ERROR(
GL_INVALID_OPERATION,
"glVertexAttribDivisorANGLE", "function not available");
return error::kNoError;
}
GLuint index = c.index;
GLuint divisor = c.divisor;
if (index >= group_->max_vertex_attribs()) {
LOCAL_SET_GL_ERROR(
GL_INVALID_VALUE,
"glVertexAttribDivisorANGLE", "index out of range");
return error::kNoError;
}
state_.vertex_attrib_manager->SetDivisor(
index,
divisor);
glVertexAttribDivisorANGLE(index, divisor);
return error::kNoError;
}
| 0 |
mod_wsgi | b0a149c1f5e569932325972e2e20176a42e43517 | NOT_APPLICABLE | NOT_APPLICABLE | static int wsgi_execute_script(request_rec *r)
{
WSGIRequestConfig *config = NULL;
InterpreterObject *interp = NULL;
PyObject *modules = NULL;
PyObject *module = NULL;
const char *script = NULL;
const char *name = NULL;
int exists = 0;
int status;
/* Grab request configuration. */
config = (WSGIRequestConfig *)ap_get_module_config(r->request_config,
&wsgi_module);
/*
* Acquire the desired python interpreter. Once this is done
* it is safe to start manipulating python objects.
*/
interp = wsgi_acquire_interpreter(config->application_group);
if (!interp) {
ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r,
"mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.",
getpid(), config->application_group);
return HTTP_INTERNAL_SERVER_ERROR;
}
/* Calculate the Python module name to be used for script. */
if (config->handler_script && *config->handler_script)
script = config->handler_script;
else
script = r->filename;
name = wsgi_module_name(r->pool, script);
/*
* Use a lock around the check to see if the module is
* already loaded and the import of the module to prevent
* two request handlers trying to import the module at the
* same time.
*/
#if APR_HAS_THREADS
Py_BEGIN_ALLOW_THREADS
apr_thread_mutex_lock(wsgi_module_lock);
Py_END_ALLOW_THREADS
#endif
modules = PyImport_GetModuleDict();
module = PyDict_GetItemString(modules, name);
Py_XINCREF(module);
if (module)
exists = 1;
/*
* If script reloading is enabled and the module for it has
* previously been loaded, see if it has been modified since
* the last time it was accessed. For a handler script will
* also see if it contains a custom function for determining
* if a reload should be performed.
*/
if (module && config->script_reloading) {
if (wsgi_reload_required(r->pool, r, script, module, r->filename)) {
/*
* Script file has changed. Discard reference to
* loaded module and work out what action we are
* supposed to take. Choices are process reloading
* and module reloading. Process reloading cannot be
* performed unless a daemon process is being used.
*/
Py_DECREF(module);
module = NULL;
#if defined(MOD_WSGI_WITH_DAEMONS)
if (*config->process_group) {
/*
* Need to restart the daemon process. We bail
* out on the request process here, sending back
* a special response header indicating that
* process is being restarted and that remote
* end should abandon connection and attempt to
* reconnect again. We also need to signal this
* process so it will actually shutdown. The
* process supervisor code will ensure that it
* is restarted.
*/
Py_BEGIN_ALLOW_THREADS
ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r,
"mod_wsgi (pid=%d): Force restart of "
"process '%s'.", getpid(),
config->process_group);
Py_END_ALLOW_THREADS
#if APR_HAS_THREADS
apr_thread_mutex_unlock(wsgi_module_lock);
#endif
wsgi_release_interpreter(interp);
r->status = HTTP_INTERNAL_SERVER_ERROR;
r->status_line = "200 Rejected";
wsgi_daemon_shutdown++;
kill(getpid(), SIGINT);
return OK;
}
else {
/*
* Need to reload just the script module. Remove
* the module from the modules dictionary before
* reloading it again. If code is executing
* within the module at the time, the callers
* reference count on the module should ensure
* it isn't actually destroyed until it is
* finished.
*/
PyDict_DelItemString(modules, name);
}
#else
/*
* Need to reload just the script module. Remove
* the module from the modules dictionary before
* reloading it again. If code is executing
* within the module at the time, the callers
* reference count on the module should ensure
* it isn't actually destroyed until it is
* finished.
*/
PyDict_DelItemString(modules, name);
#endif
}
}
/*
* When process reloading is in use need to indicate
* that request content should now be sent through.
* This is done by writing a special response header
* directly out onto the appropriate network output
* filter. The special response is picked up by
* remote end and data will then be sent.
*/
#if defined(MOD_WSGI_WITH_DAEMONS)
if (*config->process_group) {
ap_filter_t *filters;
apr_bucket_brigade *bb;
apr_bucket *b;
const char *data = "Status: 200 Continue\r\n\r\n";
int length = strlen(data);
filters = r->output_filters;
while (filters && filters->frec->ftype != AP_FTYPE_NETWORK) {
filters = filters->next;
}
bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
b = apr_bucket_transient_create(data, length,
r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
b = apr_bucket_flush_create(r->connection->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, b);
/*
* This should always work, so ignore any errors
* from passing the brigade to the network
* output filter. If there are are problems they
* will be picked up further down in processing
* anyway.
*/
ap_pass_brigade(filters, bb);
}
#endif
/* Load module if not already loaded. */
if (!module) {
module = wsgi_load_source(r->pool, r, name, exists, script,
config->process_group,
config->application_group);
}
/* Safe now to release the module lock. */
#if APR_HAS_THREADS
apr_thread_mutex_unlock(wsgi_module_lock);
#endif
/* Assume an internal server error unless everything okay. */
status = HTTP_INTERNAL_SERVER_ERROR;
/* Determine if script exists and execute it. */
if (module) {
PyObject *module_dict = NULL;
PyObject *object = NULL;
module_dict = PyModule_GetDict(module);
object = PyDict_GetItemString(module_dict, config->callable_object);
if (object) {
AdapterObject *adapter = NULL;
adapter = newAdapterObject(r);
if (adapter) {
PyObject *method = NULL;
PyObject *args = NULL;
Py_INCREF(object);
status = Adapter_run(adapter, object);
Py_DECREF(object);
/*
* Wipe out references to Apache request object
* held by Python objects, so can detect when an
* application holds on to the transient Python
* objects beyond the life of the request and
* thus raise an exception if they are used.
*/
adapter->r = NULL;
adapter->input->r = NULL;
/* Close the log object so data is flushed. */
method = PyObject_GetAttrString(adapter->log, "close");
if (!method) {
PyErr_Format(PyExc_AttributeError,
"'%s' object has no attribute 'close'",
adapter->log->ob_type->tp_name);
}
else {
args = PyTuple_New(0);
object = PyEval_CallObject(method, args);
Py_DECREF(args);
}
Py_XDECREF(object);
Py_XDECREF(method);
#if defined(MOD_WSGI_WITH_BUCKETS)
adapter->bb = NULL;
#endif
}
Py_XDECREF((PyObject *)adapter);
}
else {
Py_BEGIN_ALLOW_THREADS
ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r,
"mod_wsgi (pid=%d): Target WSGI script '%s' does "
"not contain WSGI application '%s'.",
getpid(), script, config->callable_object);
Py_END_ALLOW_THREADS
status = HTTP_NOT_FOUND;
}
}
/* Log any details of exceptions if execution failed. */
if (PyErr_Occurred())
wsgi_log_python_error(r, NULL, r->filename);
/* Cleanup and release interpreter, */
Py_XDECREF(module);
wsgi_release_interpreter(interp);
return status;
} | 0 |
linux | 0aaa81377c5a01f686bcdb8c7a6929a7bf330c68 | NOT_APPLICABLE | NOT_APPLICABLE | static void cgw_remove_all_jobs(struct net *net)
{
struct cgw_job *gwj = NULL;
struct hlist_node *nx;
ASSERT_RTNL();
hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
hlist_del(&gwj->list);
cgw_unregister_filter(net, gwj);
kmem_cache_free(cgw_cache, gwj);
}
} | 0 |
doom-vanille | 8a6d9a02fa991a91ff90ccdc73b5ceabaa6cb9ec | NOT_APPLICABLE | NOT_APPLICABLE | void M_LoadDefaults (void)
{
int i;
int len;
FILE* f;
char def[80];
char strparm[100];
char* newstring;
int parm;
boolean isstring;
// set everything to base values
numdefaults = sizeof(defaults)/sizeof(defaults[0]);
for (i=0 ; i<numdefaults ; i++)
*defaults[i].location = defaults[i].defaultvalue;
// check for a custom default file
i = M_CheckParm ("-config");
if (i && i<myargc-1)
{
defaultfile = myargv[i+1];
printf (" default file: %s\n",defaultfile);
}
else
defaultfile = basedefault;
// read the file in, overriding any set defaults
f = fopen (defaultfile, "r");
if (f)
{
while (!feof(f))
{
isstring = false;
if (fscanf (f, "%79s %99[^\n]\n", def, strparm) == 2)
{
if (strparm[0] == '"')
{
// get a string default
isstring = true;
len = strlen(strparm);
newstring = (char *) malloc(len);
strparm[len-1] = 0;
strcpy(newstring, strparm+1);
}
else if (strparm[0] == '0' && strparm[1] == 'x')
sscanf(strparm+2, "%x", &parm);
else
sscanf(strparm, "%i", &parm);
for (i=0 ; i<numdefaults ; i++)
if (!strcmp(def, defaults[i].name))
{
if (!isstring)
*defaults[i].location = parm;
else
*defaults[i].location =
(int) newstring;
break;
}
}
}
fclose (f);
}
for (i = 0; i < numdefaults; i++)
{
if (defaults[i].scantranslate)
{
parm = *defaults[i].location;
defaults[i].untranslated = parm;
*defaults[i].location = scantokey[parm];
}
}
} | 0 |
Chrome | 7a6d81a8544c595331b50a17f8c0b11feca2f029 | NOT_APPLICABLE | NOT_APPLICABLE | static void eventHandlerAttributeAttributeSetter(v8::Local<v8::Value> v8Value, const v8::FunctionCallbackInfo<v8::Value>& info)
{
v8::Local<v8::Object> holder = info.Holder();
TestInterfaceNode* impl = V8TestInterfaceNode::toImpl(holder);
impl->setEventHandlerAttribute(V8EventListenerList::getEventListener(ScriptState::current(info.GetIsolate()), v8Value, true, ListenerFindOrCreate));
}
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.