project
stringclasses 791
values | commit_id
stringlengths 6
81
| CVE ID
stringlengths 13
16
| CWE ID
stringclasses 127
values | func
stringlengths 5
484k
| vul
int8 0
1
|
---|---|---|---|---|---|
libtasn1 | 154909136c12cfa5c60732b7210827dfb1ec6aee | NOT_APPLICABLE | NOT_APPLICABLE | _asn1_get_octet_string (const unsigned char *der, asn1_node node, int *len)
{
int len2, len3, counter, tot_len, indefinite;
counter = 0;
if (*(der - 1) & ASN1_CLASS_STRUCTURED)
{
tot_len = 0;
indefinite = asn1_get_length_der (der, *len, &len3);
if (indefinite < -1)
return ASN1_DER_ERROR;
counter += len3;
if (indefinite >= 0)
indefinite += len3;
while (1)
{
if (counter > (*len))
return ASN1_DER_ERROR;
if (indefinite == -1)
{
if ((der[counter] == 0) && (der[counter + 1] == 0))
{
counter += 2;
break;
}
}
else if (counter >= indefinite)
break;
if (der[counter] != ASN1_TAG_OCTET_STRING)
return ASN1_DER_ERROR;
counter++;
len2 = asn1_get_length_der (der + counter, *len - counter, &len3);
if (len2 <= 0)
return ASN1_DER_ERROR;
counter += len3 + len2;
tot_len += len2;
}
/* copy */
if (node)
{
unsigned char temp[ASN1_MAX_LENGTH_SIZE];
int ret;
len2 = sizeof (temp);
asn1_length_der (tot_len, temp, &len2);
_asn1_set_value (node, temp, len2);
ret = _asn1_extract_der_octet (node, der, *len);
if (ret != ASN1_SUCCESS)
return ret;
}
}
else
{ /* NOT STRUCTURED */
len2 = asn1_get_length_der (der, *len, &len3);
if (len2 < 0)
return ASN1_DER_ERROR;
counter = len3 + len2;
if (node)
_asn1_set_value (node, der, counter);
}
*len = counter;
return ASN1_SUCCESS;
} | 0 |
openldap | 9badb73425a67768c09bcaed1a9c26c684af6c30 | NOT_APPLICABLE | NOT_APPLICABLE | serialNumberAndIssuerSerialCheck(
struct berval *in,
struct berval *sn,
struct berval *is,
struct berval *i_sn, /* contain serial of baseCertificateID */
void *ctx )
{
/* Parse GSER format */
enum {
HAVE_NONE = 0x0,
HAVE_SN = 0x1,
HAVE_ISSUER = 0x2,
HAVE_ALL = ( HAVE_SN | HAVE_ISSUER )
} have = HAVE_NONE, have2 = HAVE_NONE;
int numdquotes = 0;
struct berval x = *in;
struct berval ni;
if ( in->bv_len < 3 ) return LDAP_INVALID_SYNTAX;
/* no old format */
if ( in->bv_val[0] != '{' || in->bv_val[in->bv_len-1] != '}' ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len -= 2;
do {
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
/* should be at issuer or serialNumber NamedValue */
if ( strncasecmp( x.bv_val, "issuer", STRLENOF("issuer") ) == 0 ) {
if ( have & HAVE_ISSUER ) {
return LDAP_INVALID_SYNTAX;
}
/* parse IssuerSerial */
x.bv_val += STRLENOF("issuer");
x.bv_len -= STRLENOF("issuer");
if ( x.bv_val[0] != ' ' ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len--;
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
if ( x.bv_val[0] != '{' /*}*/ ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len--;
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
if ( strncasecmp( x.bv_val, "baseCertificateID ", STRLENOF("baseCertificateID ") ) != 0 ) {
return LDAP_INVALID_SYNTAX;
}
x.bv_val += STRLENOF("baseCertificateID ");
x.bv_len -= STRLENOF("baseCertificateID ");
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
if ( x.bv_val[0] != '{' /*}*/ ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len--;
do {
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
/* parse issuer of baseCertificateID */
if ( strncasecmp( x.bv_val, "issuer ", STRLENOF("issuer ") ) == 0 ) {
if ( have2 & HAVE_ISSUER ) {
return LDAP_INVALID_SYNTAX;
}
x.bv_val += STRLENOF("issuer ");
x.bv_len -= STRLENOF("issuer ");
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
if ( x.bv_val[0] != '{' /*}*/ ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len--;
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
if ( strncasecmp( x.bv_val, "directoryName:rdnSequence:", STRLENOF("directoryName:rdnSequence:") ) != 0 ) {
return LDAP_INVALID_SYNTAX;
}
x.bv_val += STRLENOF("directoryName:rdnSequence:");
x.bv_len -= STRLENOF("directoryName:rdnSequence:");
if ( x.bv_val[0] != '"' ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len--;
is->bv_val = x.bv_val;
is->bv_len = 0;
for ( ; is->bv_len < x.bv_len; ) {
if ( is->bv_val[is->bv_len] != '"' ) {
is->bv_len++;
continue;
}
if ( is->bv_val[is->bv_len + 1] == '"' ) {
/* double dquote */
numdquotes++;
is->bv_len += 2;
continue;
}
break;
}
x.bv_val += is->bv_len + 1;
x.bv_len -= is->bv_len + 1;
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
if ( x.bv_val[0] != /*{*/ '}' ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len--;
have2 |= HAVE_ISSUER;
} else if ( strncasecmp( x.bv_val, "serial ", STRLENOF("serial ") ) == 0 ) {
if ( have2 & HAVE_SN ) {
return LDAP_INVALID_SYNTAX;
}
x.bv_val += STRLENOF("serial ");
x.bv_len -= STRLENOF("serial ");
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len--) {
/* empty */;
}
if ( checkNum( &x, i_sn ) ) {
return LDAP_INVALID_SYNTAX;
}
x.bv_val += i_sn->bv_len;
x.bv_len -= i_sn->bv_len;
have2 |= HAVE_SN;
} else {
return LDAP_INVALID_SYNTAX;
}
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
if ( have2 == HAVE_ALL ) {
break;
}
if ( x.bv_val[0] != ',' ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len--;
} while ( 1 );
if ( x.bv_val[0] != /*{*/ '}' ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len--;
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
if ( x.bv_val[0] != /*{*/ '}' ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len--;
have |= HAVE_ISSUER;
} else if ( strncasecmp( x.bv_val, "serialNumber", STRLENOF("serialNumber") ) == 0 ) {
if ( have & HAVE_SN ) {
return LDAP_INVALID_SYNTAX;
}
/* parse serialNumber */
x.bv_val += STRLENOF("serialNumber");
x.bv_len -= STRLENOF("serialNumber");
if ( x.bv_val[0] != ' ' ) return LDAP_INVALID_SYNTAX;
x.bv_val++;
x.bv_len--;
/* eat leading spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
if ( checkNum( &x, sn ) ) {
return LDAP_INVALID_SYNTAX;
}
x.bv_val += sn->bv_len;
x.bv_len -= sn->bv_len;
have |= HAVE_SN;
} else {
return LDAP_INVALID_SYNTAX;
}
/* eat spaces */
for ( ; (x.bv_val[0] == ' ') && x.bv_len; x.bv_val++, x.bv_len-- ) {
/* empty */;
}
if ( have == HAVE_ALL ) {
break;
}
if ( x.bv_val[0] != ',' ) {
return LDAP_INVALID_SYNTAX;
}
x.bv_val++ ;
x.bv_len--;
} while ( 1 );
/* should have no characters left... */
if( x.bv_len ) return LDAP_INVALID_SYNTAX;
if ( numdquotes == 0 ) {
ber_dupbv_x( &ni, is, ctx );
} else {
ber_len_t src, dst;
ni.bv_len = is->bv_len - numdquotes;
ni.bv_val = ber_memalloc_x( ni.bv_len + 1, ctx );
for ( src = 0, dst = 0; src < is->bv_len; src++, dst++ ) {
if ( is->bv_val[src] == '"' ) {
src++;
}
ni.bv_val[dst] = is->bv_val[src];
}
ni.bv_val[dst] = '\0';
}
*is = ni;
/* need to handle double dquotes here */
return 0;
} | 0 |
radare2 | 04edfa82c1f3fa2bc3621ccdad2f93bdbf00e4f9 | NOT_APPLICABLE | NOT_APPLICABLE | void init_pdb_downloader(SPDBDownloaderOpt *opt, SPDBDownloader *pd) {
pd->opt = R_NEW0 (SPDBDownloaderOpt);
if (!pd->opt) {
pd->download = 0;
eprintf ("Cannot allocate memory for SPDBDownloaderOpt.\n");
return;
}
pd->opt->dbg_file = strdup (opt->dbg_file);
pd->opt->guid = strdup (opt->guid);
pd->opt->symbol_server = strdup (opt->symbol_server);
pd->opt->user_agent = strdup (opt->user_agent);
pd->opt->symbol_store_path = strdup (opt->symbol_store_path);
pd->opt->extract = opt->extract;
pd->download = download;
} | 0 |
collectd | f6be4f9b49b949b379326c3d7002476e6ce4f211 | NOT_APPLICABLE | NOT_APPLICABLE | static int network_init_gcrypt(void) /* {{{ */
{
gcry_error_t err;
/* http://lists.gnupg.org/pipermail/gcrypt-devel/2003-August/000458.html
* Because you can't know in a library whether another library has
* already initialized the library */
if (gcry_control(GCRYCTL_ANY_INITIALIZATION_P))
return (0);
/* http://www.gnupg.org/documentation/manuals/gcrypt/Multi_002dThreading.html
* To ensure thread-safety, it's important to set GCRYCTL_SET_THREAD_CBS
* *before* initalizing Libgcrypt with gcry_check_version(), which itself must
* be called before any other gcry_* function. GCRYCTL_ANY_INITIALIZATION_P
* above doesn't count, as it doesn't implicitly initalize Libgcrypt.
*
* tl;dr: keep all these gry_* statements in this exact order please. */
#if GCRYPT_VERSION_NUMBER < 0x010600
err = gcry_control(GCRYCTL_SET_THREAD_CBS, &gcry_threads_pthread);
if (err) {
ERROR("network plugin: gcry_control (GCRYCTL_SET_THREAD_CBS) failed: %s",
gcry_strerror(err));
return (-1);
}
#endif
gcry_check_version(NULL);
err = gcry_control(GCRYCTL_INIT_SECMEM, 32768);
if (err) {
ERROR("network plugin: gcry_control (GCRYCTL_INIT_SECMEM) failed: %s",
gcry_strerror(err));
return (-1);
}
gcry_control(GCRYCTL_INITIALIZATION_FINISHED);
return (0);
} /* }}} int network_init_gcrypt */ | 0 |
libarchive | 15bf44fd2c1ad0e3fd87048b3fcc90c4dcff1175 | NOT_APPLICABLE | NOT_APPLICABLE | isint_w(const wchar_t *start, const wchar_t *end, int *result)
{
int n = 0;
if (start >= end)
return (0);
while (start < end) {
if (*start < '0' || *start > '9')
return (0);
if (n > (INT_MAX / 10) ||
(n == INT_MAX / 10 && (*start - '0') > INT_MAX % 10)) {
n = INT_MAX;
} else {
n *= 10;
n += *start - '0';
}
start++;
}
*result = n;
return (1);
}
| 0 |
Chrome | 2f81d000fdb5331121cba7ff81dfaaec25b520a5 | NOT_APPLICABLE | NOT_APPLICABLE | void DownloadResourceHandler::OnReadyToRead() {
DCHECK_CURRENTLY_ON(BrowserThread::IO);
Resume();
}
| 0 |
linux | 415e3d3e90ce9e18727e8843ae343eda5a58fad6 | NOT_APPLICABLE | NOT_APPLICABLE | static int unix_stream_read_generic(struct unix_stream_read_state *state)
{
struct scm_cookie scm;
struct socket *sock = state->socket;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
int copied = 0;
int flags = state->flags;
int noblock = flags & MSG_DONTWAIT;
bool check_creds = false;
int target;
int err = 0;
long timeo;
int skip;
size_t size = state->size;
unsigned int last_len;
err = -EINVAL;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
err = -EOPNOTSUPP;
if (flags & MSG_OOB)
goto out;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, noblock);
memset(&scm, 0, sizeof(scm));
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
mutex_lock(&u->readlock);
if (flags & MSG_PEEK)
skip = sk_peek_offset(sk, flags);
else
skip = 0;
do {
int chunk;
bool drop_skb;
struct sk_buff *skb, *last;
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD)) {
err = -ECONNRESET;
goto unlock;
}
last = skb = skb_peek(&sk->sk_receive_queue);
last_len = last ? last->len : 0;
again:
if (skb == NULL) {
unix_sk(sk)->recursion_level = 0;
if (copied >= target)
goto unlock;
/*
* POSIX 1003.1g mandates this order.
*/
err = sock_error(sk);
if (err)
goto unlock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
goto unlock;
unix_state_unlock(sk);
err = -EAGAIN;
if (!timeo)
break;
mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo, last,
last_len);
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
scm_destroy(&scm);
goto out;
}
mutex_lock(&u->readlock);
continue;
unlock:
unix_state_unlock(sk);
break;
}
while (skip >= unix_skb_len(skb)) {
skip -= unix_skb_len(skb);
last = skb;
last_len = skb->len;
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (!skb)
goto again;
}
unix_state_unlock(sk);
if (check_creds) {
/* Never glue messages from different writers */
if (!unix_skb_scm_eq(skb, &scm))
break;
} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
/* Copy credentials */
scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
unix_set_secdata(&scm, skb);
check_creds = true;
}
/* Copy address just once */
if (state->msg && state->msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
state->msg->msg_name);
unix_copy_addr(state->msg, skb->sk);
sunaddr = NULL;
}
chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
skb_get(skb);
chunk = state->recv_actor(skb, skip, chunk, state);
drop_skb = !unix_skb_len(skb);
/* skb is only safe to use if !drop_skb */
consume_skb(skb);
if (chunk < 0) {
if (copied == 0)
copied = -EFAULT;
break;
}
copied += chunk;
size -= chunk;
if (drop_skb) {
/* the skb was touched by a concurrent reader;
* we should not expect anything from this skb
* anymore and assume it invalid - we can be
* sure it was dropped from the socket queue
*
* let's report a short read
*/
err = 0;
break;
}
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
UNIXCB(skb).consumed += chunk;
sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp)
unix_detach_fds(&scm, skb);
if (unix_skb_len(skb))
break;
skb_unlink(skb, &sk->sk_receive_queue);
consume_skb(skb);
if (scm.fp)
break;
} else {
/* It is questionable, see note in unix_dgram_recvmsg.
*/
if (UNIXCB(skb).fp)
scm.fp = scm_fp_dup(UNIXCB(skb).fp);
sk_peek_offset_fwd(sk, chunk);
if (UNIXCB(skb).fp)
break;
skip = 0;
last = skb;
last_len = skb->len;
unix_state_lock(sk);
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (skb)
goto again;
unix_state_unlock(sk);
break;
}
} while (size);
mutex_unlock(&u->readlock);
if (state->msg)
scm_recv(sock, state->msg, &scm, flags);
else
scm_destroy(&scm);
out:
return copied ? : err;
}
| 0 |
linux | c50ac050811d6485616a193eb0f37bfbd191cc89 | NOT_APPLICABLE | NOT_APPLICABLE | static void hugetlb_unregister_all_nodes(void)
{
int nid;
/*
* disable node device registrations.
*/
register_hugetlbfs_with_node(NULL, NULL);
/*
* remove hstate attributes from any nodes that have them.
*/
for (nid = 0; nid < nr_node_ids; nid++)
hugetlb_unregister_node(&node_devices[nid]);
}
| 0 |
linux | 17d68b763f09a9ce824ae23eb62c9efc57b69271 | NOT_APPLICABLE | NOT_APPLICABLE | static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
{
apic_set_reg(apic, APIC_TASKPRI, tpr);
apic_update_ppr(apic);
}
| 0 |
gst-plugins-good | 02174790726dd20a5c73ce2002189bf240ad4fe0 | NOT_APPLICABLE | NOT_APPLICABLE | gst_matroska_demux_parse_contents (GstMatroskaDemux * demux, GstEbmlRead * ebml)
{
GstFlowReturn ret = GST_FLOW_OK;
guint32 id;
DEBUG_ELEMENT_START (demux, ebml, "SeekHead");
if ((ret = gst_ebml_read_master (ebml, &id)) != GST_FLOW_OK) {
DEBUG_ELEMENT_STOP (demux, ebml, "SeekHead", ret);
return ret;
}
while (ret == GST_FLOW_OK && gst_ebml_read_has_remaining (ebml, 1, TRUE)) {
if ((ret = gst_ebml_peek_id (ebml, &id)) != GST_FLOW_OK)
break;
switch (id) {
case GST_MATROSKA_ID_SEEKENTRY:
{
ret = gst_matroska_demux_parse_contents_seekentry (demux, ebml);
/* Ignore EOS and errors here */
if (ret != GST_FLOW_OK) {
GST_DEBUG_OBJECT (demux, "Ignoring %s", gst_flow_get_name (ret));
ret = GST_FLOW_OK;
}
break;
}
default:
ret = gst_matroska_read_common_parse_skip (&demux->common,
ebml, "SeekHead", id);
break;
}
}
DEBUG_ELEMENT_STOP (demux, ebml, "SeekHead", ret);
/* Sort clusters by position for easier searching */
if (demux->clusters)
g_array_sort (demux->clusters, (GCompareFunc) gst_matroska_cluster_compare);
return ret;
} | 0 |
launchpad | 29014da83e5fc358d6bff0f574e9ed45e61a35ac | NOT_APPLICABLE | NOT_APPLICABLE | void OxideQQuickWebViewPrivate::attachContextSignals(
OxideQQuickWebContextPrivate* context) {
Q_Q(OxideQQuickWebView);
if (!context) {
return;
}
QObject::connect(context, SIGNAL(destroyed()),
q, SLOT(contextDestroyed()));
QObject::connect(context, SIGNAL(constructed()),
q, SLOT(contextConstructed()));
}
| 0 |
linux | 715230a44310a8cf66fbfb5a46f9a62a9b2de424 | NOT_APPLICABLE | NOT_APPLICABLE | static void tg3_carrier_off(struct tg3 *tp)
{
netif_carrier_off(tp->dev);
tp->link_up = false;
}
| 0 |
radare2 | d1e8ac62c6d978d4662f69116e30230d43033c92 | NOT_APPLICABLE | NOT_APPLICABLE | static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) {
int i, j, sym, wordsize;
ut32 stype;
wordsize = MACH0_(get_bits)(bin) / 8;
if (idx < 0 || idx >= bin->nsymtab) {
return 0;
}
if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) {
stype = S_LAZY_SYMBOL_POINTERS;
} else {
stype = S_NON_LAZY_SYMBOL_POINTERS;
}
reloc->offset = 0;
reloc->addr = 0;
reloc->addend = 0;
#define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break
switch (wordsize) {
CASE(8);
CASE(16);
CASE(32);
CASE(64);
default: return false;
}
#undef CASE
for (i = 0; i < bin->nsects; i++) {
if ((bin->sects[i].flags & SECTION_TYPE) == stype) {
for (j=0, sym=-1; bin->sects[i].reserved1+j < bin->nindirectsyms; j++)
if (idx == bin->indirectsyms[bin->sects[i].reserved1 + j]) {
sym = j;
break;
}
reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize;
reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize;
return true;
}
}
return false;
}
| 0 |
gpac | 6063b1a011c3f80cee25daade18154e15e4c058c | NOT_APPLICABLE | NOT_APPLICABLE |
GF_Err tfdt_Write(GF_Box *s, GF_BitStream *bs)
{
GF_Err e;
GF_TFBaseMediaDecodeTimeBox *ptr = (GF_TFBaseMediaDecodeTimeBox *) s;
e = gf_isom_full_box_write(s, bs);
if (e) return e;
if (ptr->version==1) {
gf_bs_write_u64(bs, ptr->baseMediaDecodeTime);
} else {
gf_bs_write_u32(bs, (u32) ptr->baseMediaDecodeTime);
}
return GF_OK; | 0 |
linux | 04c4f2ee3f68c9a4bf1653d15f1a9a435ae33f7a | NOT_APPLICABLE | NOT_APPLICABLE | static int handle_invpcid(struct kvm_vcpu *vcpu)
{
u32 vmx_instruction_info;
unsigned long type;
gva_t gva;
struct {
u64 pcid;
u64 gla;
} operand;
if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
if (type > 3) {
kvm_inject_gp(vcpu, 0);
return 1;
}
/* According to the Intel instruction reference, the memory operand
* is read even if it isn't needed (e.g., for type==all)
*/
if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false,
sizeof(operand), &gva))
return 1;
return kvm_handle_invpcid(vcpu, type, gva);
} | 0 |
openssl | e9bbefbf0f24c57645e7ad6a5a71ae649d18ac8e | NOT_APPLICABLE | NOT_APPLICABLE | static int dtls1_record_replay_check(SSL *s, DTLS1_BITMAP *bitmap)
{
int cmp;
unsigned int shift;
const unsigned char *seq = s->s3->read_sequence;
cmp = satsub64be(seq, bitmap->max_seq_num);
if (cmp > 0) {
memcpy(s->s3->rrec.seq_num, seq, 8);
return 1; /* this record in new */
}
shift = -cmp;
if (shift >= sizeof(bitmap->map) * 8)
return 0; /* stale, outside the window */
else if (bitmap->map & (1UL << shift))
return 0; /* record previously received */
memcpy(s->s3->rrec.seq_num, seq, 8);
return 1;
}
| 0 |
vim | a6f9e300161f4cb54713da22f65b261595e8e614 | NOT_APPLICABLE | NOT_APPLICABLE | is_first_match(compl_T *match)
{
return match == compl_first_match;
} | 0 |
hhvm | 08193b7f0cd3910256e00d599f0f3eb2519c44ca | NOT_APPLICABLE | NOT_APPLICABLE | int preg_replace(Variant& result,
const Variant& pattern,
const Variant& replacement,
const Variant& subject,
int limit /* = -1 */) {
int64_t count;
result = preg_replace_impl(pattern, replacement, subject,
limit, &count, false, false);
return count;
} | 0 |
linux | bd97120fc3d1a11f3124c7c9ba1d91f51829eb85 | NOT_APPLICABLE | NOT_APPLICABLE | int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
struct vring_used_elem __user *used;
/* The virtqueue contains a ring of used buffers. Get a pointer to the
* next entry in that used ring. */
used = &vq->used->ring[vq->last_used_idx % vq->num];
if (__put_user(head, &used->id)) {
vq_err(vq, "Failed to write used id");
return -EFAULT;
}
if (__put_user(len, &used->len)) {
vq_err(vq, "Failed to write used len");
return -EFAULT;
}
/* Make sure buffer is written before we update index. */
smp_wmb();
if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
if (unlikely(vq->log_used)) {
/* Make sure data is seen before log. */
smp_wmb();
/* Log used ring entry write. */
log_write(vq->log_base,
vq->log_addr +
((void __user *)used - (void __user *)vq->used),
sizeof *used);
/* Log used index update. */
log_write(vq->log_base,
vq->log_addr + offsetof(struct vring_used, idx),
sizeof vq->used->idx);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
vq->last_used_idx++;
/* If the driver never bothers to signal in a very long while,
* used index might wrap around. If that happens, invalidate
* signalled_used index we stored. TODO: make sure driver
* signals at least once in 2^16 and remove this. */
if (unlikely(vq->last_used_idx == vq->signalled_used))
vq->signalled_used_valid = false;
return 0;
}
| 0 |
Chrome | 8ea3a5c06218fa42d25c3aa0a4ab57153e178523 | NOT_APPLICABLE | NOT_APPLICABLE | void ChromeClientImpl::setCursor(const WebCursorInfo& cursor)
{
#if OS(MACOSX)
if (m_webView->hasOpenedPopup())
return;
#endif
if (m_webView->client())
m_webView->client()->didChangeCursor(cursor);
}
| 0 |
Chrome | fea16c8b60ff3d0756d5eb392394963b647bc41a | NOT_APPLICABLE | NOT_APPLICABLE | ContentSecurityPolicy::~ContentSecurityPolicy() {}
| 0 |
radare2 | 9b46d38dd3c4de6048a488b655c7319f845af185 | NOT_APPLICABLE | NOT_APPLICABLE | static int oplldt(RAsm *a, ut8 *data, const Opcode *op) {
int l = 0;
switch (op->operands_count) {
case 1:
if ( op->operands[0].type & OT_WORD ) {
data[l++] = 0x0f;
data[l++] = 0x00;
if ( op->operands[0].type & OT_MEMORY ) {
data[l++] = 0x10 | op->operands[0].regs[0];
} else {
data[l++] = 0xd0 | op->operands[0].reg;
}
} else {
return -1;
}
break;
default:
return -1;
}
return l;
}
| 0 |
gerbv | 319a8af890e4d0a5c38e6d08f510da8eefc42537 | NOT_APPLICABLE | NOT_APPLICABLE | static void drill_report(gerbv_aperture_t *apertures[], int aperture_num)
{
gerbv_aperture_type_t type = apertures[aperture_num]->type;
double *params = apertures[aperture_num]->parameter;
g_message (_(" Tool used: T%d"), aperture_num);
if (type == GERBV_APTYPE_CIRCLE)
g_message (_(" Diameter: %g %s"),
screen_units(params[0]),
screen_units_str());
} | 0 |
Android | 1f24c730ab6ca5aff1e3137b340b8aeaeda4bdbc | NOT_APPLICABLE | NOT_APPLICABLE | status_t CameraSource::configureCamera(
CameraParameters* params,
int32_t width, int32_t height,
int32_t frameRate) {
ALOGV("configureCamera");
Vector<Size> sizes;
bool isSetVideoSizeSupportedByCamera = true;
getSupportedVideoSizes(*params, &isSetVideoSizeSupportedByCamera, sizes);
bool isCameraParamChanged = false;
if (width != -1 && height != -1) {
if (!isVideoSizeSupported(width, height, sizes)) {
ALOGE("Video dimension (%dx%d) is unsupported", width, height);
return BAD_VALUE;
}
if (isSetVideoSizeSupportedByCamera) {
params->setVideoSize(width, height);
} else {
params->setPreviewSize(width, height);
}
isCameraParamChanged = true;
} else if ((width == -1 && height != -1) ||
(width != -1 && height == -1)) {
ALOGE("Requested video size (%dx%d) is not supported", width, height);
return BAD_VALUE;
} else { // width == -1 && height == -1
}
if (frameRate != -1) {
CHECK(frameRate > 0 && frameRate <= 120);
const char* supportedFrameRates =
params->get(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES);
CHECK(supportedFrameRates != NULL);
ALOGV("Supported frame rates: %s", supportedFrameRates);
char buf[4];
snprintf(buf, 4, "%d", frameRate);
if (strstr(supportedFrameRates, buf) == NULL) {
ALOGE("Requested frame rate (%d) is not supported: %s",
frameRate, supportedFrameRates);
return BAD_VALUE;
}
params->setPreviewFrameRate(frameRate);
isCameraParamChanged = true;
} else { // frameRate == -1
}
if (isCameraParamChanged) {
String8 s = params->flatten();
if (OK != mCamera->setParameters(s)) {
ALOGE("Could not change settings."
" Someone else is using camera %p?", mCamera.get());
return -EBUSY;
}
}
return OK;
}
| 0 |
qemu | b3af7fdf9cc537f8f0dd3e2423d83f5c99a457e8 | NOT_APPLICABLE | NOT_APPLICABLE | static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
int start_track, format, msf, toclen;
uint64_t nb_sectors;
msf = req->cmd.buf[1] & 2;
format = req->cmd.buf[2] & 0xf;
start_track = req->cmd.buf[6];
blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
switch (format) {
case 0:
toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
break;
case 1:
/* multi session : only a single session defined */
toclen = 12;
memset(outbuf, 0, 12);
outbuf[1] = 0x0a;
outbuf[2] = 0x01;
outbuf[3] = 0x01;
break;
case 2:
toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
break;
default:
return -1;
}
return toclen;
} | 0 |
linux | 0ddcff49b672239dda94d70d0fcf50317a9f4b51 | NOT_APPLICABLE | NOT_APPLICABLE | static void hwsim_mcast_del_radio(int id, const char *hwname,
struct genl_info *info)
{
struct sk_buff *skb;
void *data;
int ret;
skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return;
data = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0,
HWSIM_CMD_DEL_RADIO);
if (!data)
goto error;
ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id);
if (ret < 0)
goto error;
ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(hwname),
hwname);
if (ret < 0)
goto error;
genlmsg_end(skb, data);
hwsim_mcast_config_msg(skb, info);
return;
error:
nlmsg_free(skb);
}
| 0 |
php-src | cc08cbc84d46933c1e9e0149633f1ed5d19e45e9 | NOT_APPLICABLE | NOT_APPLICABLE | allocDynamic (dynamicPtr * dp, int initialSize, void *data)
{
if (data == NULL) {
dp->logicalSize = 0;
dp->dataGood = FALSE;
dp->data = gdMalloc(initialSize);
} else {
dp->logicalSize = initialSize;
dp->dataGood = TRUE;
dp->data = data;
}
dp->realSize = initialSize;
dp->dataGood = TRUE;
dp->pos = 0;
return TRUE;
} | 0 |
FFmpeg | 689e59b7ffed34eba6159dcc78e87133862e3746 | NOT_APPLICABLE | NOT_APPLICABLE | static int mov_read_ftyp(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
uint32_t minor_ver;
int comp_brand_size;
char minor_ver_str[11]; /* 32 bit integer -> 10 digits + null */
char* comp_brands_str;
uint8_t type[5] = {0};
avio_read(pb, type, 4);
if (strcmp(type, "qt "))
c->isom = 1;
av_log(c->fc, AV_LOG_DEBUG, "ISO: File Type Major Brand: %.4s\n",(char *)&type);
av_dict_set(&c->fc->metadata, "major_brand", type, 0);
minor_ver = avio_rb32(pb); /* minor version */
snprintf(minor_ver_str, sizeof(minor_ver_str), "%d", minor_ver);
av_dict_set(&c->fc->metadata, "minor_version", minor_ver_str, 0);
comp_brand_size = atom.size - 8;
if (comp_brand_size < 0)
return AVERROR_INVALIDDATA;
comp_brands_str = av_malloc(comp_brand_size + 1); /* Add null terminator */
if (!comp_brands_str)
return AVERROR(ENOMEM);
avio_read(pb, comp_brands_str, comp_brand_size);
comp_brands_str[comp_brand_size] = 0;
av_dict_set(&c->fc->metadata, "compatible_brands", comp_brands_str, 0);
av_freep(&comp_brands_str);
return 0;
}
| 0 |
gpac | bceb03fd2be95097a7b409ea59914f332fb6bc86 | NOT_APPLICABLE | NOT_APPLICABLE | GF_Err dimm_Read(GF_Box *s, GF_BitStream *bs)
{
GF_DIMMBox *ptr = (GF_DIMMBox *)s;
ptr->nbBytes = gf_bs_read_u64(bs);
return GF_OK;
}
| 0 |
linux | f8bd2258e2d520dff28c855658bd24bdafb5102d | NOT_APPLICABLE | NOT_APPLICABLE | static int count_free(struct page *page)
{
return page->objects - page->inuse;
}
| 0 |
linux | 04f25edb48c441fc278ecc154c270f16966cbb90 | NOT_APPLICABLE | NOT_APPLICABLE | static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{
u8 tc;
tc = hdev->tm_info.prio_tc[pri_id];
if (tc >= hdev->tm_info.num_tc)
return -EINVAL;
/**
* the register for priority has four bytes, the first bytes includes
* priority0 and priority1, the higher 4bit stands for priority1
* while the lower 4bit stands for priority0, as below:
* first byte: | pri_1 | pri_0 |
* second byte: | pri_3 | pri_2 |
* third byte: | pri_5 | pri_4 |
* fourth byte: | pri_7 | pri_6 |
*/
pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
return 0;
} | 0 |
linux-2.6 | 649f1ee6c705aab644035a7998d7b574193a598a | CVE-2008-4934 | CWE-20 | int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max)
{
struct page *page;
struct address_space *mapping;
__be32 *pptr, *curr, *end;
u32 mask, start, len, n;
__be32 val;
int i;
len = *max;
if (!len)
return size;
dprint(DBG_BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
mutex_lock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
mapping = HFSPLUS_SB(sb).alloc_file->i_mapping;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
pptr = kmap(page);
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
i = offset % 32;
offset &= ~(PAGE_CACHE_BITS - 1);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
/* scan the first partial u32 for zero bits */
val = *curr;
if (~val) {
n = be32_to_cpu(val);
mask = (1U << 31) >> i;
for (; i < 32; mask >>= 1, i++) {
if (!(n & mask))
goto found;
}
}
curr++;
/* scan complete u32s for the first zero bit */
while (1) {
while (curr < end) {
val = *curr;
if (~val) {
n = be32_to_cpu(val);
mask = 1 << 31;
for (i = 0; i < 32; mask >>= 1, i++) {
if (!(n & mask))
goto found;
}
}
curr++;
}
kunmap(page);
offset += PAGE_CACHE_BITS;
if (offset >= size)
break;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
curr = pptr = kmap(page);
if ((size ^ offset) / PAGE_CACHE_BITS)
end = pptr + PAGE_CACHE_BITS / 32;
else
end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
}
dprint(DBG_BITMAP, "bitmap full\n");
start = size;
goto out;
found:
start = offset + (curr - pptr) * 32 + i;
if (start >= size) {
dprint(DBG_BITMAP, "bitmap full\n");
goto out;
}
/* do any partial u32 at the start */
len = min(size - start, len);
while (1) {
n |= mask;
if (++i >= 32)
break;
mask >>= 1;
if (!--len || n & mask)
goto done;
}
if (!--len)
goto done;
*curr++ = cpu_to_be32(n);
/* do full u32s */
while (1) {
while (curr < end) {
n = be32_to_cpu(*curr);
if (len < 32)
goto last;
if (n) {
len = 32;
goto last;
}
*curr++ = cpu_to_be32(0xffffffff);
len -= 32;
}
set_page_dirty(page);
kunmap(page);
offset += PAGE_CACHE_BITS;
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
NULL);
pptr = kmap(page);
curr = pptr;
end = pptr + PAGE_CACHE_BITS / 32;
}
last:
/* do any partial u32 at end */
mask = 1U << 31;
for (i = 0; i < len; i++) {
if (n & mask)
break;
n |= mask;
mask >>= 1;
}
done:
*curr = cpu_to_be32(n);
set_page_dirty(page);
kunmap(page);
*max = offset + (curr - pptr) * 32 + i - start;
HFSPLUS_SB(sb).free_blocks -= *max;
sb->s_dirt = 1;
dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
out:
mutex_unlock(&HFSPLUS_SB(sb).alloc_file->i_mutex);
return start;
} | 1 |
src | ed8fdce754a5d8d14c09e989d8877707bd43906f | NOT_APPLICABLE | NOT_APPLICABLE | syn_cache_reaper(void *arg)
{
struct syn_cache *sc = arg;
pool_put(&syn_cache_pool, (sc));
return;
} | 0 |
tor | a0ef3cf0880e3cd343977b3fcbd0a2e7572f0cb4 | NOT_APPLICABLE | NOT_APPLICABLE | dirvote_fetch_missing_votes(void)
{
smartlist_t *missing_fps = smartlist_new();
char *resource;
SMARTLIST_FOREACH_BEGIN(router_get_trusted_dir_servers(),
dir_server_t *, ds) {
if (!(ds->type & V3_DIRINFO))
continue;
if (!dirvote_get_vote(ds->v3_identity_digest,
DGV_BY_ID|DGV_INCLUDE_PENDING)) {
char *cp = tor_malloc(HEX_DIGEST_LEN+1);
base16_encode(cp, HEX_DIGEST_LEN+1, ds->v3_identity_digest,
DIGEST_LEN);
smartlist_add(missing_fps, cp);
}
} SMARTLIST_FOREACH_END(ds);
if (!smartlist_len(missing_fps)) {
smartlist_free(missing_fps);
return;
}
{
char *tmp = smartlist_join_strings(missing_fps, " ", 0, NULL);
log_notice(LOG_NOTICE, "We're missing votes from %d authorities (%s). "
"Asking every other authority for a copy.",
smartlist_len(missing_fps), tmp);
tor_free(tmp);
}
resource = smartlist_join_strings(missing_fps, "+", 0, NULL);
directory_get_from_all_authorities(DIR_PURPOSE_FETCH_STATUS_VOTE,
0, resource);
tor_free(resource);
SMARTLIST_FOREACH(missing_fps, char *, cp, tor_free(cp));
smartlist_free(missing_fps);
} | 0 |
libsass | f2db04883e5fff4e03777dcc1eb60d4373c45be1 | NOT_APPLICABLE | NOT_APPLICABLE | Media_Query_Obj Parser::parse_media_query()
{
advanceToNextToken();
Media_Query_Obj media_query = SASS_MEMORY_NEW(Media_Query, pstate);
if (lex < kwd_not >()) { media_query->is_negated(true); lex < css_comments >(false); }
else if (lex < kwd_only >()) { media_query->is_restricted(true); lex < css_comments >(false); }
if (lex < identifier_schema >()) media_query->media_type(parse_identifier_schema());
else if (lex < identifier >()) media_query->media_type(parse_interpolated_chunk(lexed));
else media_query->append(parse_media_expression());
while (lex_css < kwd_and >()) media_query->append(parse_media_expression());
if (lex < identifier_schema >()) {
String_Schema* schema = SASS_MEMORY_NEW(String_Schema, pstate);
schema->append(media_query->media_type());
schema->append(SASS_MEMORY_NEW(String_Constant, pstate, " "));
schema->append(parse_identifier_schema());
media_query->media_type(schema);
}
while (lex_css < kwd_and >()) media_query->append(parse_media_expression());
media_query->update_pstate(pstate);
return media_query;
} | 0 |
linux | 7926aff5c57b577ab0f43364ff0c59d968f6a414 | NOT_APPLICABLE | NOT_APPLICABLE | static void write_bulk_callback(struct urb *urb)
{
rtl8150_t *dev;
int status = urb->status;
dev = urb->context;
if (!dev)
return;
dev_kfree_skb_irq(dev->tx_skb);
if (!netif_device_present(dev->netdev))
return;
if (status)
dev_info(&urb->dev->dev, "%s: Tx status %d\n",
dev->netdev->name, status);
netif_trans_update(dev->netdev);
netif_wake_queue(dev->netdev);
}
| 0 |
linux | d1f82808877bb10d3deee7cf3374a4eb3fb582db | NOT_APPLICABLE | NOT_APPLICABLE |
static void __io_queue_sqe(struct io_kiocb *req)
{
struct io_kiocb *linked_timeout = io_prep_linked_timeout(req);
int ret;
ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
/*
* We async punt it if the file wasn't marked NOWAIT, or if the file
* doesn't support non-blocking read/write attempts
*/
if (likely(!ret)) {
/* drop submission reference */
if (req->flags & REQ_F_COMPLETE_INLINE) {
struct io_ring_ctx *ctx = req->ctx;
struct io_comp_state *cs = &ctx->submit_state.comp;
cs->reqs[cs->nr++] = req;
if (cs->nr == ARRAY_SIZE(cs->reqs))
io_submit_flush_completions(cs, ctx);
} else {
io_put_req(req);
}
} else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
if (!io_arm_poll_handler(req)) {
/*
* Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted.
*/
io_queue_async_work(req);
}
} else {
io_req_complete_failed(req, ret);
}
if (linked_timeout)
io_queue_linked_timeout(linked_timeout); | 0 |
linux | fdf5af0daf8019cec2396cdef8fb042d80fe71fa | NOT_APPLICABLE | NOT_APPLICABLE | static void tcp_cwnd_down(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int decr = tp->snd_cwnd_cnt + 1;
if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) ||
(tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) {
tp->snd_cwnd_cnt = decr & 1;
decr >>= 1;
if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
tp->snd_cwnd -= decr;
tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
}
| 0 |
neovim | 4fad66fbe637818b6b3d6bc5d21923ba72795040 | NOT_APPLICABLE | NOT_APPLICABLE | void save_file_ff(buf_T *buf)
{
buf->b_start_ffc = *buf->b_p_ff;
buf->b_start_eol = buf->b_p_eol;
buf->b_start_bomb = buf->b_p_bomb;
/* Only use free/alloc when necessary, they take time. */
if (buf->b_start_fenc == NULL
|| STRCMP(buf->b_start_fenc, buf->b_p_fenc) != 0) {
xfree(buf->b_start_fenc);
buf->b_start_fenc = vim_strsave(buf->b_p_fenc);
}
} | 0 |
cyrus-imapd | 53c4137bd924b954432c6c59da7572c4c5ffa901 | NOT_APPLICABLE | NOT_APPLICABLE | static void cmd_listrights(char *tag, char *name, char *identifier)
{
int r, rights;
mbentry_t *mbentry = NULL;
struct auth_state *authstate;
const char *canon_identifier;
int implicit;
char rightsdesc[100], optional[33];
char *intname = mboxname_from_external(name, &imapd_namespace, imapd_userid);
r = mlookup(tag, name, intname, &mbentry);
if (r == IMAP_MAILBOX_MOVED) return;
if (!r) {
rights = cyrus_acl_myrights(imapd_authstate, mbentry->acl);
if (!rights && !imapd_userisadmin &&
!mboxname_userownsmailbox(imapd_userid, intname)) {
r = IMAP_MAILBOX_NONEXISTENT;
}
}
mboxlist_entry_free(&mbentry);
imapd_check(NULL, 0);
if (r) {
prot_printf(imapd_out, "%s NO %s\r\n", tag, error_message(r));
return;
}
authstate = auth_newstate(identifier);
if (global_authisa(authstate, IMAPOPT_ADMINS))
canon_identifier = identifier; /* don't canonify global admins */
else
canon_identifier = canonify_userid(identifier, imapd_userid, NULL);
auth_freestate(authstate);
if (!canon_identifier) {
implicit = 0;
}
else if (mboxname_userownsmailbox(canon_identifier, intname)) {
/* identifier's personal mailbox */
implicit = config_implicitrights;
}
else if (mboxname_isusermailbox(intname, 1)) {
/* anyone can post to an INBOX */
implicit = ACL_POST;
}
else {
implicit = 0;
}
/* calculate optional rights */
cyrus_acl_masktostr(implicit ^ (canon_identifier ? ACL_FULL : 0),
optional);
/* build the rights string */
if (implicit) {
cyrus_acl_masktostr(implicit, rightsdesc);
}
else {
strcpy(rightsdesc, "\"\"");
}
if (*optional) {
int i, n = strlen(optional);
char *p = rightsdesc + strlen(rightsdesc);
for (i = 0; i < n; i++) {
*p++ = ' ';
*p++ = optional[i];
}
*p = '\0';
}
prot_printf(imapd_out, "* LISTRIGHTS ");
prot_printastring(imapd_out, name);
(void)prot_putc(' ', imapd_out);
prot_printastring(imapd_out, identifier);
prot_printf(imapd_out, " %s", rightsdesc);
prot_printf(imapd_out, "\r\n%s OK %s\r\n", tag,
error_message(IMAP_OK_COMPLETED));
free(intname);
}
| 0 |
php-src | 77f619d48259383628c3ec4654b1ad578e9eb40e | NOT_APPLICABLE | NOT_APPLICABLE | BGD_DECLARE(void) gdImageFilledEllipse (gdImagePtr im, int mx, int my, int w, int h, int c)
{
int x=0,mx1=0,mx2=0,my1=0,my2=0;
long aq,bq,dx,dy,r,rx,ry,a,b;
int i;
int old_y2;
a=w>>1;
b=h>>1;
for (x = mx-a; x <= mx+a; x++) {
gdImageSetPixel(im, x, my, c);
}
mx1 = mx-a;
my1 = my;
mx2 = mx+a;
my2 = my;
aq = a * a;
bq = b * b;
dx = aq << 1;
dy = bq << 1;
r = a * bq;
rx = r << 1;
ry = 0;
x = a;
old_y2=-2;
while (x > 0) {
if (r > 0) {
my1++;
my2--;
ry +=dx;
r -=ry;
}
if (r <= 0) {
x--;
mx1++;
mx2--;
rx -=dy;
r +=rx;
}
if(old_y2!=my2) {
for(i=mx1; i<=mx2; i++) {
gdImageSetPixel(im,i,my1,c);
}
}
if(old_y2!=my2) {
for(i=mx1; i<=mx2; i++) {
gdImageSetPixel(im,i,my2,c);
}
}
old_y2 = my2;
}
}
| 0 |
Chrome | f85a87ec670ad0fce9d98d90c9a705b72a288154 | NOT_APPLICABLE | NOT_APPLICABLE | static void reflectedTreatNullAsNullStringCustomURLAttrAttributeGetterCallback(v8::Local<v8::String>, const v8::PropertyCallbackInfo<v8::Value>& info)
{
TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMGetter");
TestObjectV8Internal::reflectedTreatNullAsNullStringCustomURLAttrAttributeGetter(info);
TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution");
}
| 0 |
linux | 427215d85e8d1476da1a86b8d67aceb485eb3631 | NOT_APPLICABLE | NOT_APPLICABLE | static unsigned int attr_flags_to_mnt_flags(u64 attr_flags)
{
unsigned int mnt_flags = 0;
if (attr_flags & MOUNT_ATTR_RDONLY)
mnt_flags |= MNT_READONLY;
if (attr_flags & MOUNT_ATTR_NOSUID)
mnt_flags |= MNT_NOSUID;
if (attr_flags & MOUNT_ATTR_NODEV)
mnt_flags |= MNT_NODEV;
if (attr_flags & MOUNT_ATTR_NOEXEC)
mnt_flags |= MNT_NOEXEC;
if (attr_flags & MOUNT_ATTR_NODIRATIME)
mnt_flags |= MNT_NODIRATIME;
if (attr_flags & MOUNT_ATTR_NOSYMFOLLOW)
mnt_flags |= MNT_NOSYMFOLLOW;
return mnt_flags;
} | 0 |
Android | 65c49d5b382de4085ee5668732bcb0f6ecaf7148 | NOT_APPLICABLE | NOT_APPLICABLE | const CuePoint::TrackPosition* CuePoint::Find(const Track* pTrack) const {
assert(pTrack);
const long long n = pTrack->GetNumber();
const TrackPosition* i = m_track_positions;
const TrackPosition* const j = i + m_track_positions_count;
while (i != j) {
const TrackPosition& p = *i++;
if (p.m_track == n)
return &p;
}
return NULL; // no matching track number found
}
| 0 |
linux | 550fd08c2cebad61c548def135f67aba284c6162 | NOT_APPLICABLE | NOT_APPLICABLE | static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq)
{
u32 seq,index;
if (mcast) {
if (test_bit(FLAG_UPDATE_MULTI, &ai->flags)) {
clear_bit (FLAG_UPDATE_MULTI, &ai->flags);
context->window = (micSeq > 33) ? micSeq : 33;
context->rx = 0; // Reset rx
}
} else if (test_bit(FLAG_UPDATE_UNI, &ai->flags)) {
clear_bit (FLAG_UPDATE_UNI, &ai->flags);
context->window = (micSeq > 33) ? micSeq : 33; // Move window
context->rx = 0; // Reset rx
}
seq = micSeq - (context->window - 33);
if ((s32)seq < 0)
return ERROR;
if ( seq > 64 ) {
MoveWindow(context,micSeq);
return SUCCESS;
}
seq >>= 1; //divide by 2 because we only have odd numbers
index = 1 << seq; //Get an index number
if (!(context->rx & index)) {
context->rx |= index;
MoveWindow(context,micSeq);
return SUCCESS;
}
return ERROR;
}
| 0 |
linux | 57e68e9cd65b4b8eb4045a1e0d0746458502554c | NOT_APPLICABLE | NOT_APPLICABLE | SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
{
int ret;
len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
start &= PAGE_MASK;
down_write(¤t->mm->mmap_sem);
ret = do_mlock(start, len, 0);
up_write(¤t->mm->mmap_sem);
return ret;
}
| 0 |
menu-cache | 56f66684592abf257c4004e6e1fff041c64a12ce | NOT_APPLICABLE | NOT_APPLICABLE | static void terminate(int sig)
{
/* #ifndef HAVE_ABSTRACT_SOCKETS */
unlink(socket_file);
exit(0);
/* #endif */
} | 0 |
Onigmo | ce13b17b955e0b6dfc6606b1fdbd4755590b360b | NOT_APPLICABLE | NOT_APPLICABLE | euckr_is_allowed_reverse_match(const UChar* s, const UChar* end ARG_UNUSED, OnigEncoding enc ARG_UNUSED)
{
const UChar c = *s;
if (c <= 0x7e) return TRUE;
else return FALSE;
} | 0 |
ChakraCore | 402f3d967c0a905ec5b9ca9c240783d3f2c15724 | NOT_APPLICABLE | NOT_APPLICABLE | ParseNode* Parser::GetFunctionBlock()
{
Assert(m_currentBlockInfo != nullptr);
return m_currentBlockInfo->pBlockInfoFunction->pnodeBlock;
} | 0 |
src | 142cfc82b932bc211218fbd7bdda8c7ce83f19df | NOT_APPLICABLE | NOT_APPLICABLE | server_writeheader_http(struct client *clt, struct kv *hdr, void *arg)
{
char *ptr;
const char *key;
if (hdr->kv_flags & KV_FLAG_INVALID)
return (0);
/* The key might have been updated in the parent */
if (hdr->kv_parent != NULL && hdr->kv_parent->kv_key != NULL)
key = hdr->kv_parent->kv_key;
else
key = hdr->kv_key;
ptr = hdr->kv_value;
if (server_bufferevent_print(clt, key) == -1 ||
(ptr != NULL &&
(server_bufferevent_print(clt, ": ") == -1 ||
server_bufferevent_print(clt, ptr) == -1 ||
server_bufferevent_print(clt, "\r\n") == -1)))
return (-1);
DPRINTF("%s: %s: %s", __func__, key,
hdr->kv_value == NULL ? "" : hdr->kv_value);
return (0);
}
| 0 |
linux | 0305cd5f7fca85dae392b9ba85b116896eb7c1c7 | NOT_APPLICABLE | NOT_APPLICABLE | static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
void *ctx)
{
struct btrfs_file_extent_item *extent;
struct btrfs_fs_info *fs_info;
struct old_sa_defrag_extent *old = ctx;
struct new_sa_defrag_extent *new = old->new;
struct btrfs_path *path = new->path;
struct btrfs_key key;
struct btrfs_root *root;
struct sa_defrag_extent_backref *backref;
struct extent_buffer *leaf;
struct inode *inode = new->inode;
int slot;
int ret;
u64 extent_offset;
u64 num_bytes;
if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
inum == btrfs_ino(inode))
return 0;
key.objectid = root_id;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
fs_info = BTRFS_I(inode)->root->fs_info;
root = btrfs_read_fs_root_no_name(fs_info, &key);
if (IS_ERR(root)) {
if (PTR_ERR(root) == -ENOENT)
return 0;
WARN_ON(1);
pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
inum, offset, root_id);
return PTR_ERR(root);
}
key.objectid = inum;
key.type = BTRFS_EXTENT_DATA_KEY;
if (offset > (u64)-1 << 32)
key.offset = 0;
else
key.offset = offset;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (WARN_ON(ret < 0))
return ret;
ret = 0;
while (1) {
cond_resched();
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret < 0) {
goto out;
} else if (ret > 0) {
ret = 0;
goto out;
}
continue;
}
path->slots[0]++;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid > inum)
goto out;
if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
continue;
extent = btrfs_item_ptr(leaf, slot,
struct btrfs_file_extent_item);
if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
continue;
/*
* 'offset' refers to the exact key.offset,
* NOT the 'offset' field in btrfs_extent_data_ref, ie.
* (key.offset - extent_offset).
*/
if (key.offset != offset)
continue;
extent_offset = btrfs_file_extent_offset(leaf, extent);
num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
if (extent_offset >= old->extent_offset + old->offset +
old->len || extent_offset + num_bytes <=
old->extent_offset + old->offset)
continue;
break;
}
backref = kmalloc(sizeof(*backref), GFP_NOFS);
if (!backref) {
ret = -ENOENT;
goto out;
}
backref->root_id = root_id;
backref->inum = inum;
backref->file_pos = offset;
backref->num_bytes = num_bytes;
backref->extent_offset = extent_offset;
backref->generation = btrfs_file_extent_generation(leaf, extent);
backref->old = old;
backref_insert(&new->root, backref);
old->count++;
out:
btrfs_release_path(path);
WARN_ON(ret);
return ret;
}
| 0 |
linux | 4943ba16bbc2db05115707b3ff7b4874e9e3c560 | NOT_APPLICABLE | NOT_APPLICABLE | static int init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct priv *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) {
*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
crypto_free_cipher(cipher);
return -EINVAL;
}
ctx->child = cipher;
return 0;
}
| 0 |
linux | f85daf0e725358be78dfd208dea5fd665d8cb901 | NOT_APPLICABLE | NOT_APPLICABLE | static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
const struct flowi *fl,
u16 family, u8 dir,
struct xfrm_flo *xflo, u32 if_id)
{
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols = 0, num_xfrms = 0, err;
struct xfrm_dst *xdst;
/* Resolve policies to use if we couldn't get them from
* previous cache entry */
num_pols = 1;
pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
err = xfrm_expand_policies(fl, family, pols,
&num_pols, &num_xfrms);
if (err < 0)
goto inc_error;
if (num_pols == 0)
return NULL;
if (num_xfrms <= 0)
goto make_dummy_bundle;
xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
xflo->dst_orig);
if (IS_ERR(xdst)) {
err = PTR_ERR(xdst);
if (err == -EREMOTE) {
xfrm_pols_put(pols, num_pols);
return NULL;
}
if (err != -EAGAIN)
goto error;
goto make_dummy_bundle;
} else if (xdst == NULL) {
num_xfrms = 0;
goto make_dummy_bundle;
}
return xdst;
make_dummy_bundle:
/* We found policies, but there's no bundles to instantiate:
* either because the policy blocks, has no transformations or
* we could not build template (no xfrm_states).*/
xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
if (IS_ERR(xdst)) {
xfrm_pols_put(pols, num_pols);
return ERR_CAST(xdst);
}
xdst->num_pols = num_pols;
xdst->num_xfrms = num_xfrms;
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
return xdst;
inc_error:
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
error:
xfrm_pols_put(pols, num_pols);
return ERR_PTR(err);
} | 0 |
Chrome | da9a32b9e282c1653bb6b5c1b8c89a1970905f21 | NOT_APPLICABLE | NOT_APPLICABLE | void RenderFrameHostImpl::Cut() {
Send(new InputMsg_Cut(routing_id_));
RecordAction(base::UserMetricsAction("Cut"));
}
| 0 |
vim | 54e5fed6d27b747ff152cdb6edfb72ff60e70939 | NOT_APPLICABLE | NOT_APPLICABLE | use_midword(slang_T *lp, win_T *wp)
{
char_u *p;
if (lp->sl_midword == NULL) // there aren't any
return;
for (p = lp->sl_midword; *p != NUL; )
if (has_mbyte)
{
int c, l, n;
char_u *bp;
c = mb_ptr2char(p);
l = (*mb_ptr2len)(p);
if (c < 256 && l <= 2)
wp->w_s->b_spell_ismw[c] = TRUE;
else if (wp->w_s->b_spell_ismw_mb == NULL)
// First multi-byte char in "b_spell_ismw_mb".
wp->w_s->b_spell_ismw_mb = vim_strnsave(p, l);
else
{
// Append multi-byte chars to "b_spell_ismw_mb".
n = (int)STRLEN(wp->w_s->b_spell_ismw_mb);
bp = vim_strnsave(wp->w_s->b_spell_ismw_mb, n + l);
if (bp != NULL)
{
vim_free(wp->w_s->b_spell_ismw_mb);
wp->w_s->b_spell_ismw_mb = bp;
vim_strncpy(bp + n, p, l);
}
}
p += l;
}
else
wp->w_s->b_spell_ismw[*p++] = TRUE;
} | 0 |
Chrome | fd506b0ac6c7846ae45b5034044fe85c28ee68ac | NOT_APPLICABLE | NOT_APPLICABLE | FrameLoadType FrameLoader::DetermineFrameLoadType(
const FrameLoadRequest& request) {
if (frame_->Tree().Parent() &&
!state_machine_.CommittedFirstRealDocumentLoad())
return kFrameLoadTypeInitialInChildFrame;
if (!frame_->Tree().Parent() && !Client()->BackForwardLength()) {
if (Opener() && request.GetResourceRequest().Url().IsEmpty())
return kFrameLoadTypeReplaceCurrentItem;
return kFrameLoadTypeStandard;
}
if (request.GetResourceRequest().GetCacheMode() ==
mojom::FetchCacheMode::kValidateCache)
return kFrameLoadTypeReload;
if (request.GetResourceRequest().GetCacheMode() ==
mojom::FetchCacheMode::kBypassCache)
return kFrameLoadTypeReloadBypassingCache;
if (request.ReplacesCurrentItem() ||
(!state_machine_.CommittedMultipleRealLoads() &&
DeprecatedEqualIgnoringCase(frame_->GetDocument()->Url(), BlankURL())))
return kFrameLoadTypeReplaceCurrentItem;
if (request.GetResourceRequest().Url() == document_loader_->UrlForHistory()) {
if (request.GetResourceRequest().HttpMethod() == HTTPNames::POST)
return kFrameLoadTypeStandard;
if (!request.OriginDocument())
return kFrameLoadTypeReload;
return kFrameLoadTypeReplaceCurrentItem;
}
if (request.GetSubstituteData().FailingURL() ==
document_loader_->UrlForHistory() &&
document_loader_->LoadType() == kFrameLoadTypeReload)
return kFrameLoadTypeReload;
if (request.GetResourceRequest().Url().IsEmpty() &&
request.GetSubstituteData().FailingURL().IsEmpty()) {
return kFrameLoadTypeReplaceCurrentItem;
}
if (request.OriginDocument() &&
!request.OriginDocument()->CanCreateHistoryEntry())
return kFrameLoadTypeReplaceCurrentItem;
return kFrameLoadTypeStandard;
}
| 0 |
Chrome | 7c3bb2970fd0890df611b1d8b345b60b1978c2d7 | NOT_APPLICABLE | NOT_APPLICABLE | void PlatformFontSkia::InitFromDetails(sk_sp<SkTypeface> typeface,
const std::string& font_family,
int font_size_pixels,
int style,
Font::Weight weight,
const FontRenderParams& render_params) {
TRACE_EVENT0("fonts", "PlatformFontSkia::InitFromDetails");
DCHECK_GT(font_size_pixels, 0);
font_family_ = font_family;
bool success = true;
typeface_ = typeface ? std::move(typeface)
: CreateSkTypeface(style & Font::ITALIC, weight,
&font_family_, &success);
if (!success) {
LOG(ERROR) << "Could not find any font: " << font_family << ", "
<< kFallbackFontFamilyName << ". Falling back to the default";
InitFromPlatformFont(g_default_font.Get().get());
return;
}
font_size_pixels_ = font_size_pixels;
style_ = style;
weight_ = weight;
device_scale_factor_ = GetFontRenderParamsDeviceScaleFactor();
font_render_params_ = render_params;
}
| 0 |
linux | 12ca6ad2e3a896256f086497a7c7406a547ee373 | NOT_APPLICABLE | NOT_APPLICABLE | void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx;
ctx = perf_event_ctx_lock(event);
_perf_event_disable(event);
perf_event_ctx_unlock(event, ctx);
}
| 0 |
tip | dfb4357da6ddbdf57d583ba64361c9d792b0e0b1 | NOT_APPLICABLE | NOT_APPLICABLE | static void *move_iter(struct timer_list_iter *iter, loff_t offset)
{
for (; offset; offset--) {
iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
if (iter->cpu >= nr_cpu_ids) {
#ifdef CONFIG_GENERIC_CLOCKEVENTS
if (!iter->second_pass) {
iter->cpu = -1;
iter->second_pass = true;
} else
return NULL;
#else
return NULL;
#endif
}
}
return iter;
} | 0 |
Chrome | 98095c718d7580b5d6715e5bfd8698234ecb4470 | NOT_APPLICABLE | NOT_APPLICABLE | WebGLRenderingContextBase::WebGLRenderingContextBase(
CanvasRenderingContextHost* host,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
std::unique_ptr<WebGraphicsContext3DProvider> context_provider,
bool using_gpu_compositing,
const CanvasContextCreationAttributesCore& requested_attributes,
unsigned version)
: CanvasRenderingContext(host, requested_attributes),
context_group_(new WebGLContextGroup()),
is_hidden_(false),
context_lost_mode_(kNotLostContext),
auto_recovery_method_(kManual),
dispatch_context_lost_event_timer_(
task_runner,
this,
&WebGLRenderingContextBase::DispatchContextLostEvent),
restore_allowed_(false),
restore_timer_(task_runner,
this,
&WebGLRenderingContextBase::MaybeRestoreContext),
task_runner_(task_runner),
generated_image_cache_(4),
synthesized_errors_to_console_(true),
num_gl_errors_to_console_allowed_(kMaxGLErrorsAllowedToConsole),
one_plus_max_non_default_texture_unit_(0),
is_web_gl2_formats_types_added_(false),
is_web_gl2_tex_image_source_formats_types_added_(false),
is_web_gl2_internal_formats_copy_tex_image_added_(false),
is_oes_texture_float_formats_types_added_(false),
is_oes_texture_half_float_formats_types_added_(false),
is_web_gl_depth_texture_formats_types_added_(false),
is_ext_srgb_formats_types_added_(false),
is_ext_color_buffer_float_formats_added_(false),
version_(version) {
DCHECK(context_provider);
Host()->RegisterContextToDispatch(this);
compatible_xr_device_ =
static_cast<XRDevice*>(requested_attributes.compatible_xr_device.Get());
context_group_->AddContext(this);
max_viewport_dims_[0] = max_viewport_dims_[1] = 0;
context_provider->ContextGL()->GetIntegerv(GL_MAX_VIEWPORT_DIMS,
max_viewport_dims_);
scoped_refptr<DrawingBuffer> buffer;
buffer =
CreateDrawingBuffer(std::move(context_provider), using_gpu_compositing);
if (!buffer) {
context_lost_mode_ = kSyntheticLostContext;
return;
}
drawing_buffer_ = std::move(buffer);
GetDrawingBuffer()->Bind(GL_FRAMEBUFFER);
SetupFlags();
String disabled_webgl_extensions(GetDrawingBuffer()
->ContextProvider()
->GetGpuFeatureInfo()
.disabled_webgl_extensions.c_str());
Vector<String> disabled_extension_list;
disabled_webgl_extensions.Split(' ', disabled_extension_list);
for (const auto& entry : disabled_extension_list) {
disabled_extensions_.insert(entry);
}
#define ADD_VALUES_TO_SET(set, values) \
for (size_t i = 0; i < arraysize(values); ++i) { \
set.insert(values[i]); \
}
ADD_VALUES_TO_SET(supported_internal_formats_, kSupportedFormatsES2);
ADD_VALUES_TO_SET(supported_tex_image_source_internal_formats_,
kSupportedFormatsES2);
ADD_VALUES_TO_SET(supported_internal_formats_copy_tex_image_,
kSupportedFormatsES2);
ADD_VALUES_TO_SET(supported_formats_, kSupportedFormatsES2);
ADD_VALUES_TO_SET(supported_tex_image_source_formats_, kSupportedFormatsES2);
ADD_VALUES_TO_SET(supported_types_, kSupportedTypesES2);
ADD_VALUES_TO_SET(supported_tex_image_source_types_, kSupportedTypesES2);
}
| 0 |
Chrome | 5bb223676defeba9c44a5ce42460c86e24561e73 | NOT_APPLICABLE | NOT_APPLICABLE | void ChromeContentBrowserClient::ExposeInterfacesToRenderer(
service_manager::BinderRegistry* registry,
blink::AssociatedInterfaceRegistry* associated_registry,
content::RenderProcessHost* render_process_host) {
associated_registry->AddInterface(
base::Bind(&CacheStatsRecorder::Create, render_process_host->GetID()));
scoped_refptr<base::SingleThreadTaskRunner> ui_task_runner =
base::CreateSingleThreadTaskRunnerWithTraits(
{content::BrowserThread::UI});
registry->AddInterface(
base::Bind(&rappor::RapporRecorderImpl::Create,
g_browser_process->rappor_service()),
ui_task_runner);
registry->AddInterface(
base::BindRepeating(&metrics::CallStackProfileCollector::Create));
if (NetBenchmarking::CheckBenchmarkingEnabled()) {
Profile* profile =
Profile::FromBrowserContext(render_process_host->GetBrowserContext());
auto* loading_predictor =
predictors::LoadingPredictorFactory::GetForProfile(profile);
registry->AddInterface(
base::BindRepeating(
&NetBenchmarking::Create,
loading_predictor ? loading_predictor->GetWeakPtr() : nullptr,
render_process_host->GetID()),
ui_task_runner);
}
#if defined(SAFE_BROWSING_DB_LOCAL) || defined(SAFE_BROWSING_DB_REMOTE)
if (safe_browsing_service_) {
content::ResourceContext* resource_context =
render_process_host->GetBrowserContext()->GetResourceContext();
registry->AddInterface(
base::Bind(
&safe_browsing::MojoSafeBrowsingImpl::MaybeCreate,
render_process_host->GetID(), resource_context,
base::Bind(
&ChromeContentBrowserClient::GetSafeBrowsingUrlCheckerDelegate,
base::Unretained(this), resource_context)),
base::CreateSingleThreadTaskRunnerWithTraits({BrowserThread::IO}));
}
#endif // defined(SAFE_BROWSING_DB_LOCAL) || defined(SAFE_BROWSING_DB_REMOTE)
if (data_reduction_proxy::params::IsEnabledWithNetworkService()) {
registry->AddInterface(base::BindRepeating(
&AddDataReductionProxyBinding,
render_process_host->GetBrowserContext()->GetResourceContext()));
}
#if defined(OS_WIN)
auto get_process = base::BindRepeating(
[](content::RenderProcessHost* host) -> base::Process {
return host->GetProcess().Duplicate();
},
base::Unretained(render_process_host));
registry->AddInterface(
base::BindRepeating(&ModuleEventSinkImpl::Create, std::move(get_process),
content::PROCESS_TYPE_RENDERER,
base::Unretained(ModuleDatabase::GetInstance())),
ui_task_runner);
#endif
#if defined(OS_ANDROID)
Profile* profile =
Profile::FromBrowserContext(render_process_host->GetBrowserContext());
registry->AddInterface(
base::BindRepeating(&android::AvailableOfflineContentProvider::Create,
profile),
base::CreateSingleThreadTaskRunnerWithTraits({BrowserThread::UI}));
#endif
for (auto* ep : extra_parts_) {
ep->ExposeInterfacesToRenderer(registry, associated_registry,
render_process_host);
}
}
| 0 |
net-snmp | 5f881d3bf24599b90d67a45cae7a3eb099cd71c9 | NOT_APPLICABLE | NOT_APPLICABLE | int netsnmp_query_getnext(netsnmp_variable_list *list,
netsnmp_session *session){
return _query( list, SNMP_MSG_GETNEXT, session );
} | 0 |
Android | aeea52da00d210587fb3ed895de3d5f2e0264c88 | NOT_APPLICABLE | NOT_APPLICABLE | int Effect_setEnabled(EffectContext *pContext, bool enabled)
{
ALOGV("\tEffect_setEnabled() type %d, enabled %d", pContext->EffectType, enabled);
if (enabled) {
bool tempDisabled = false;
switch (pContext->EffectType) {
case LVM_BASS_BOOST:
if (pContext->pBundledContext->bBassEnabled == LVM_TRUE) {
ALOGV("\tEffect_setEnabled() LVM_BASS_BOOST is already enabled");
return -EINVAL;
}
if(pContext->pBundledContext->SamplesToExitCountBb <= 0){
pContext->pBundledContext->NumberEffectsEnabled++;
}
pContext->pBundledContext->SamplesToExitCountBb =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
pContext->pBundledContext->bBassEnabled = LVM_TRUE;
tempDisabled = pContext->pBundledContext->bBassTempDisabled;
break;
case LVM_EQUALIZER:
if (pContext->pBundledContext->bEqualizerEnabled == LVM_TRUE) {
ALOGV("\tEffect_setEnabled() LVM_EQUALIZER is already enabled");
return -EINVAL;
}
if(pContext->pBundledContext->SamplesToExitCountEq <= 0){
pContext->pBundledContext->NumberEffectsEnabled++;
}
pContext->pBundledContext->SamplesToExitCountEq =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
pContext->pBundledContext->bEqualizerEnabled = LVM_TRUE;
break;
case LVM_VIRTUALIZER:
if (pContext->pBundledContext->bVirtualizerEnabled == LVM_TRUE) {
ALOGV("\tEffect_setEnabled() LVM_VIRTUALIZER is already enabled");
return -EINVAL;
}
if(pContext->pBundledContext->SamplesToExitCountVirt <= 0){
pContext->pBundledContext->NumberEffectsEnabled++;
}
pContext->pBundledContext->SamplesToExitCountVirt =
(LVM_INT32)(pContext->pBundledContext->SamplesPerSecond*0.1);
pContext->pBundledContext->bVirtualizerEnabled = LVM_TRUE;
tempDisabled = pContext->pBundledContext->bVirtualizerTempDisabled;
break;
case LVM_VOLUME:
if (pContext->pBundledContext->bVolumeEnabled == LVM_TRUE) {
ALOGV("\tEffect_setEnabled() LVM_VOLUME is already enabled");
return -EINVAL;
}
pContext->pBundledContext->NumberEffectsEnabled++;
pContext->pBundledContext->bVolumeEnabled = LVM_TRUE;
break;
default:
ALOGV("\tEffect_setEnabled() invalid effect type");
return -EINVAL;
}
if (!tempDisabled) {
LvmEffect_enable(pContext);
}
} else {
switch (pContext->EffectType) {
case LVM_BASS_BOOST:
if (pContext->pBundledContext->bBassEnabled == LVM_FALSE) {
ALOGV("\tEffect_setEnabled() LVM_BASS_BOOST is already disabled");
return -EINVAL;
}
pContext->pBundledContext->bBassEnabled = LVM_FALSE;
break;
case LVM_EQUALIZER:
if (pContext->pBundledContext->bEqualizerEnabled == LVM_FALSE) {
ALOGV("\tEffect_setEnabled() LVM_EQUALIZER is already disabled");
return -EINVAL;
}
pContext->pBundledContext->bEqualizerEnabled = LVM_FALSE;
break;
case LVM_VIRTUALIZER:
if (pContext->pBundledContext->bVirtualizerEnabled == LVM_FALSE) {
ALOGV("\tEffect_setEnabled() LVM_VIRTUALIZER is already disabled");
return -EINVAL;
}
pContext->pBundledContext->bVirtualizerEnabled = LVM_FALSE;
break;
case LVM_VOLUME:
if (pContext->pBundledContext->bVolumeEnabled == LVM_FALSE) {
ALOGV("\tEffect_setEnabled() LVM_VOLUME is already disabled");
return -EINVAL;
}
pContext->pBundledContext->bVolumeEnabled = LVM_FALSE;
break;
default:
ALOGV("\tEffect_setEnabled() invalid effect type");
return -EINVAL;
}
LvmEffect_disable(pContext);
}
return 0;
}
| 0 |
linux | 84ac7260236a49c79eede91617700174c2c19b0c | NOT_APPLICABLE | NOT_APPLICABLE | static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct packet_sock *po = pkt_sk(sock->sk);
if (level != SOL_PACKET)
return -ENOPROTOOPT;
if (optname == PACKET_FANOUT_DATA &&
po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
optval = (char __user *)get_compat_bpf_fprog(optval);
if (!optval)
return -EFAULT;
optlen = sizeof(struct sock_fprog);
}
return packet_setsockopt(sock, level, optname, optval, optlen);
}
| 0 |
Chrome | b276d0570cc816bfe25b431f2ee9bc265a6ad478 | NOT_APPLICABLE | NOT_APPLICABLE | int32_t TestURLLoader::Open(const pp::URLRequestInfo& request,
bool trusted,
std::string* response_body) {
pp::URLLoader loader(instance_);
if (trusted)
url_loader_trusted_interface_->GrantUniversalAccess(loader.pp_resource());
{
TestCompletionCallback open_callback(instance_->pp_instance(),
callback_type());
open_callback.WaitForResult(
loader.Open(request, open_callback.GetCallback()));
if (open_callback.result() != PP_OK)
return open_callback.result();
}
int32_t bytes_read = 0;
do {
char buffer[1024];
TestCompletionCallback read_callback(instance_->pp_instance(),
callback_type());
read_callback.WaitForResult(loader.ReadResponseBody(
&buffer, sizeof(buffer), read_callback.GetCallback()));
bytes_read = read_callback.result();
if (bytes_read < 0)
return bytes_read;
if (response_body)
response_body->append(std::string(buffer, bytes_read));
} while (bytes_read > 0);
return PP_OK;
}
| 0 |
radare2 | e9ce0d64faf19fa4e9c260250fbdf25e3c11e152 | NOT_APPLICABLE | NOT_APPLICABLE | R_API RBinSymbol *r_bin_java_create_new_symbol_from_cp_idx(ut32 cp_idx, ut64 baddr) {
RBinSymbol *sym = NULL;
RBinJavaCPTypeObj *obj = r_bin_java_get_item_from_bin_cp_list (
R_BIN_JAVA_GLOBAL_BIN, cp_idx);
if (obj) {
switch (obj->tag) {
case R_BIN_JAVA_CP_METHODREF:
case R_BIN_JAVA_CP_FIELDREF:
case R_BIN_JAVA_CP_INTERFACEMETHOD_REF:
sym = r_bin_java_create_new_symbol_from_ref (obj, baddr);
break;
case R_BIN_JAVA_CP_INVOKEDYNAMIC:
sym = r_bin_java_create_new_symbol_from_invoke_dynamic (obj, baddr);
break;
default:
break;
}
}
return sym;
} | 0 |
linux | d974baa398f34393db76be45f7d4d04fbdbb4a0a | NOT_APPLICABLE | NOT_APPLICABLE | static void __vmx_load_host_state(struct vcpu_vmx *vmx)
{
if (!vmx->host_state.loaded)
return;
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu))
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
#ifdef CONFIG_X86_64
load_gs_index(vmx->host_state.gs_sel);
#else
loadsegment(gs, vmx->host_state.gs_sel);
#endif
}
if (vmx->host_state.fs_reload_needed)
loadsegment(fs, vmx->host_state.fs_sel);
#ifdef CONFIG_X86_64
if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
loadsegment(ds, vmx->host_state.ds_sel);
loadsegment(es, vmx->host_state.es_sel);
}
#endif
reload_tss();
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
if (vmx->host_state.msr_host_bndcfgs)
wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
/*
* If the FPU is not active (through the host task or
* the guest vcpu), then restore the cr0.TS bit.
*/
if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
stts();
load_gdt(this_cpu_ptr(&host_gdt));
}
| 0 |
exiv2 | c72d16f4c402a8acc2dfe06fe3d58bf6cf99069e | NOT_APPLICABLE | NOT_APPLICABLE | int RafImage::pixelHeight() const
{
Exiv2::ExifData::const_iterator heightIter = exifData_.findKey(Exiv2::ExifKey("Exif.Photo.PixelYDimension"));
if (heightIter != exifData_.end() && heightIter->count() > 0) {
return heightIter->toLong();
}
return 0;
} | 0 |
linux | 23fcb3340d033d9f081e21e6c12c2db7eaa541d3 | NOT_APPLICABLE | NOT_APPLICABLE | xfs_inode_buf_readahead_verify(
struct xfs_buf *bp)
{
xfs_inode_buf_verify(bp, true);
}
| 0 |
libarchive | 3014e198 | NOT_APPLICABLE | NOT_APPLICABLE | write_rr_ER(struct archive_write *a)
{
unsigned char *p;
p = wb_buffptr(a);
memset(p, 0, LOGICAL_BLOCK_SIZE);
p[0] = 'E';
p[1] = 'R';
p[3] = 0x01;
p[2] = RRIP_ER_SIZE;
p[4] = RRIP_ER_ID_SIZE;
p[5] = RRIP_ER_DSC_SIZE;
p[6] = RRIP_ER_SRC_SIZE;
p[7] = 0x01;
memcpy(&p[8], rrip_identifier, p[4]);
memcpy(&p[8+p[4]], rrip_descriptor, p[5]);
memcpy(&p[8+p[4]+p[5]], rrip_source, p[6]);
return (wb_consume(a, LOGICAL_BLOCK_SIZE));
}
| 0 |
gpac | bceb03fd2be95097a7b409ea59914f332fb6bc86 | NOT_APPLICABLE | NOT_APPLICABLE | GF_Box *trex_New()
{
ISOM_DECL_BOX_ALLOC(GF_TrackExtendsBox, GF_ISOM_BOX_TYPE_TREX);
return (GF_Box *)tmp;
}
| 0 |
linux | 942080643bce061c3dd9d5718d3b745dcb39a8bc | NOT_APPLICABLE | NOT_APPLICABLE | void ecryptfs_write_crypt_stat_flags(char *page_virt,
struct ecryptfs_crypt_stat *crypt_stat,
size_t *written)
{
u32 flags = 0;
int i;
for (i = 0; i < ((sizeof(ecryptfs_flag_map)
/ sizeof(struct ecryptfs_flag_map_elem))); i++)
if (crypt_stat->flags & ecryptfs_flag_map[i].local_flag)
flags |= ecryptfs_flag_map[i].file_flag;
/* Version is in top 8 bits of the 32-bit flag vector */
flags |= ((((u8)crypt_stat->file_version) << 24) & 0xFF000000);
put_unaligned_be32(flags, page_virt);
(*written) = 4;
}
| 0 |
linux | 93c647643b48f0131f02e45da3bd367d80443291 | NOT_APPLICABLE | NOT_APPLICABLE | static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name);
u32 dst_portid;
u32 dst_group;
struct sk_buff *skb;
int err;
struct scm_cookie scm;
u32 netlink_skb_flags = 0;
if (msg->msg_flags&MSG_OOB)
return -EOPNOTSUPP;
err = scm_send(sock, msg, &scm, true);
if (err < 0)
return err;
if (msg->msg_namelen) {
err = -EINVAL;
if (addr->nl_family != AF_NETLINK)
goto out;
dst_portid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
err = -EPERM;
if ((dst_group || dst_portid) &&
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
goto out;
netlink_skb_flags |= NETLINK_SKB_DST;
} else {
dst_portid = nlk->dst_portid;
dst_group = nlk->dst_group;
}
if (!nlk->bound) {
err = netlink_autobind(sock);
if (err)
goto out;
} else {
/* Ensure nlk is hashed and visible. */
smp_rmb();
}
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
err = -ENOBUFS;
skb = netlink_alloc_large_skb(len, dst_group);
if (skb == NULL)
goto out;
NETLINK_CB(skb).portid = nlk->portid;
NETLINK_CB(skb).dst_group = dst_group;
NETLINK_CB(skb).creds = scm.creds;
NETLINK_CB(skb).flags = netlink_skb_flags;
err = -EFAULT;
if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
kfree_skb(skb);
goto out;
}
err = security_netlink_send(sk, skb);
if (err) {
kfree_skb(skb);
goto out;
}
if (dst_group) {
refcount_inc(&skb->users);
netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
}
err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
out:
scm_destroy(&scm);
return err;
} | 0 |
linux | 5d26a105b5a73e5635eae0629b42fa0a90e07b7b | NOT_APPLICABLE | NOT_APPLICABLE | static int padlock_cra_init(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *fallback_tfm;
int err = -ENOMEM;
/* Allocate a fallback and abort if it failed. */
fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
fallback_driver_name);
err = PTR_ERR(fallback_tfm);
goto out;
}
ctx->fallback = fallback_tfm;
hash->descsize += crypto_shash_descsize(fallback_tfm);
return 0;
out:
return err;
}
| 0 |
linux | 0ddcff49b672239dda94d70d0fcf50317a9f4b51 | NOT_APPLICABLE | NOT_APPLICABLE | static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
hwsim_set_chanctx_magic(ctx);
wiphy_dbg(hw->wiphy,
"add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n",
ctx->def.chan->center_freq, ctx->def.width,
ctx->def.center_freq1, ctx->def.center_freq2);
return 0;
}
| 0 |
linux | 0ad646c81b2182f7fa67ec0c8c825e0ee165696d | NOT_APPLICABLE | NOT_APPLICABLE | static void tun_flow_cleanup(unsigned long data)
{
struct tun_struct *tun = (struct tun_struct *)data;
unsigned long delay = tun->ageing_time;
unsigned long next_timer = jiffies + delay;
unsigned long count = 0;
int i;
tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
struct hlist_node *n;
hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
unsigned long this_timer;
count++;
this_timer = e->updated + delay;
if (time_before_eq(this_timer, jiffies))
tun_flow_delete(tun, e);
else if (time_before(this_timer, next_timer))
next_timer = this_timer;
}
}
if (count)
mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
spin_unlock_bh(&tun->lock);
}
| 0 |
libsass | 8f40dc03e5ab5a8b2ebeb72b31f8d1adbb2fd6ae | NOT_APPLICABLE | NOT_APPLICABLE | void ADDCALL sass_file_context_set_options (struct Sass_File_Context* ctx, struct Sass_Options* opt) { copy_options(ctx, opt); } | 0 |
linux | e6bd18f57aad1a2d1ef40e646d03ed0f2515c9e3 | NOT_APPLICABLE | NOT_APPLICABLE | static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
u16 pkey)
{
int ret = -ENOENT, i, intable = 0;
struct hfi1_pportdata *ppd = uctxt->ppd;
struct hfi1_devdata *dd = uctxt->dd;
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
ret = -EINVAL;
goto done;
}
for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
if (pkey == ppd->pkeys[i]) {
intable = 1;
break;
}
if (intable)
ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
done:
return ret;
}
| 0 |
libvirt | 447f69dec47e1b0bd15ecd7cd49a9fd3b050fb87 | NOT_APPLICABLE | NOT_APPLICABLE | storageConnectNumOfStoragePools(virConnectPtr conn)
{
if (virConnectNumOfStoragePoolsEnsureACL(conn) < 0)
return -1;
return virStoragePoolObjNumOfStoragePools(driver->pools, conn, true,
virConnectNumOfStoragePoolsCheckACL);
} | 0 |
hhvm | 08193b7f0cd3910256e00d599f0f3eb2519c44ca | NOT_APPLICABLE | NOT_APPLICABLE | bool MemFile::closeImpl() {
*s_pcloseRet = 0;
setIsClosed(true);
if (m_malloced && m_data) {
free(m_data);
m_data = nullptr;
}
File::closeImpl();
return true;
} | 0 |
libxslt | 7ca19df892ca22d9314e95d59ce2abdeff46b617 | NOT_APPLICABLE | NOT_APPLICABLE | xsltIfComp(xsltStylesheetPtr style, xmlNodePtr inst) {
#ifdef XSLT_REFACTORED
xsltStyleItemIfPtr comp;
#else
xsltStylePreCompPtr comp;
#endif
if ((style == NULL) || (inst == NULL) || (inst->type != XML_ELEMENT_NODE))
return;
#ifdef XSLT_REFACTORED
comp = (xsltStyleItemIfPtr)
xsltNewStylePreComp(style, XSLT_FUNC_IF);
#else
comp = xsltNewStylePreComp(style, XSLT_FUNC_IF);
#endif
if (comp == NULL)
return;
inst->psvi = comp;
comp->inst = inst;
comp->test = xsltGetCNsProp(style, inst, (const xmlChar *)"test", XSLT_NAMESPACE);
if (comp->test == NULL) {
xsltTransformError(NULL, style, inst,
"xsl:if : test is not defined\n");
if (style != NULL) style->errors++;
return;
}
comp->comp = xsltXPathCompile(style, comp->test);
if (comp->comp == NULL) {
xsltTransformError(NULL, style, inst,
"xsl:if : could not compile test expression '%s'\n",
comp->test);
if (style != NULL) style->errors++;
}
} | 0 |
linux | a3727a8bac0a9e77c70820655fd8715523ba3db7 | NOT_APPLICABLE | NOT_APPLICABLE | static void smack_sock_graft(struct sock *sk, struct socket *parent)
{
struct socket_smack *ssp;
struct smack_known *skp = smk_of_current();
if (sk == NULL ||
(sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
return;
ssp = sk->sk_security;
ssp->smk_in = skp;
ssp->smk_out = skp;
/* cssp->smk_packet is already set in smack_inet_csk_clone() */
} | 0 |
linux | 04197b341f23b908193308b8d63d17ff23232598 | NOT_APPLICABLE | NOT_APPLICABLE | xfs_start_page_writeback(
struct page *page,
int clear_dirty)
{
ASSERT(PageLocked(page));
ASSERT(!PageWriteback(page));
/*
* if the page was not fully cleaned, we need to ensure that the higher
* layers come back to it correctly. That means we need to keep the page
* dirty, and for WB_SYNC_ALL writeback we need to ensure the
* PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
* write this page in this writeback sweep will be made.
*/
if (clear_dirty) {
clear_page_dirty_for_io(page);
set_page_writeback(page);
} else
set_page_writeback_keepwrite(page);
unlock_page(page);
}
| 0 |
linux | b4e00444cab4c3f3fec876dc0cccc8cbb0d1a948 | NOT_APPLICABLE | NOT_APPLICABLE | static __always_inline void delayed_free_task(struct task_struct *tsk)
{
if (IS_ENABLED(CONFIG_MEMCG))
call_rcu(&tsk->rcu, __delayed_free_task);
else
free_task(tsk);
} | 0 |
Chrome | 73edae623529f04c668268de49d00324b96166a2 | NOT_APPLICABLE | NOT_APPLICABLE | void HTMLElement::collectStyleForAttribute(const Attribute& attribute, StylePropertySet* style)
{
if (attribute.name() == alignAttr) {
if (equalIgnoringCase(attribute.value(), "middle"))
addPropertyToAttributeStyle(style, CSSPropertyTextAlign, CSSValueCenter);
else
addPropertyToAttributeStyle(style, CSSPropertyTextAlign, attribute.value());
} else if (attribute.name() == contenteditableAttr) {
if (attribute.isEmpty() || equalIgnoringCase(attribute.value(), "true")) {
addPropertyToAttributeStyle(style, CSSPropertyWebkitUserModify, CSSValueReadWrite);
addPropertyToAttributeStyle(style, CSSPropertyWordWrap, CSSValueBreakWord);
addPropertyToAttributeStyle(style, CSSPropertyWebkitNbspMode, CSSValueSpace);
addPropertyToAttributeStyle(style, CSSPropertyWebkitLineBreak, CSSValueAfterWhiteSpace);
} else if (equalIgnoringCase(attribute.value(), "plaintext-only")) {
addPropertyToAttributeStyle(style, CSSPropertyWebkitUserModify, CSSValueReadWritePlaintextOnly);
addPropertyToAttributeStyle(style, CSSPropertyWordWrap, CSSValueBreakWord);
addPropertyToAttributeStyle(style, CSSPropertyWebkitNbspMode, CSSValueSpace);
addPropertyToAttributeStyle(style, CSSPropertyWebkitLineBreak, CSSValueAfterWhiteSpace);
} else if (equalIgnoringCase(attribute.value(), "false"))
addPropertyToAttributeStyle(style, CSSPropertyWebkitUserModify, CSSValueReadOnly);
} else if (attribute.name() == hiddenAttr) {
addPropertyToAttributeStyle(style, CSSPropertyDisplay, CSSValueNone);
} else if (attribute.name() == draggableAttr) {
if (equalIgnoringCase(attribute.value(), "true")) {
addPropertyToAttributeStyle(style, CSSPropertyWebkitUserDrag, CSSValueElement);
addPropertyToAttributeStyle(style, CSSPropertyWebkitUserSelect, CSSValueNone);
} else if (equalIgnoringCase(attribute.value(), "false"))
addPropertyToAttributeStyle(style, CSSPropertyWebkitUserDrag, CSSValueNone);
} else if (attribute.name() == dirAttr) {
if (equalIgnoringCase(attribute.value(), "auto"))
addPropertyToAttributeStyle(style, CSSPropertyUnicodeBidi, unicodeBidiAttributeForDirAuto(this));
else {
addPropertyToAttributeStyle(style, CSSPropertyDirection, attribute.value());
if (!hasTagName(bdiTag) && !hasTagName(bdoTag) && !hasTagName(outputTag))
addPropertyToAttributeStyle(style, CSSPropertyUnicodeBidi, CSSValueEmbed);
}
} else if (attribute.name().matches(XMLNames::langAttr)) {
mapLanguageAttributeToLocale(attribute, style);
} else if (attribute.name() == langAttr) {
if (!fastHasAttribute(XMLNames::langAttr))
mapLanguageAttributeToLocale(attribute, style);
} else
StyledElement::collectStyleForAttribute(attribute, style);
}
| 0 |
linux | cabfb3680f78981d26c078a26e5c748531257ebb | NOT_APPLICABLE | NOT_APPLICABLE | create_reconnect_durable_v2_buf(struct cifs_fid *fid)
{
struct create_durable_handle_reconnect_v2 *buf;
buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
GFP_KERNEL);
if (!buf)
return NULL;
buf->ccontext.DataOffset =
cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
dcontext));
buf->ccontext.DataLength =
cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
buf->ccontext.NameOffset =
cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
Name));
buf->ccontext.NameLength = cpu_to_le16(4);
buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
/* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
buf->Name[0] = 'D';
buf->Name[1] = 'H';
buf->Name[2] = '2';
buf->Name[3] = 'C';
return buf;
}
| 0 |
linux | 3f6f1480d86bf9fc16c160d803ab1d006e3058d5 | NOT_APPLICABLE | NOT_APPLICABLE | int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 ||
(mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative)
ctxt->memopp->addr.mem.ea += ctxt->_eip;
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
| 0 |
Chrome | 11a4cc4a6d6e665d9a118fada4b7c658d6f70d95 | NOT_APPLICABLE | NOT_APPLICABLE | static int cornerStart(const RenderStyle* style, int minX, int maxX, int thickness)
{
if (style->shouldPlaceBlockDirectionScrollbarOnLogicalLeft())
return minX + style->borderLeftWidth();
return maxX - thickness - style->borderRightWidth();
}
| 0 |
Chrome | 36773850210becda3d76f27285ecd899fafdfc72 | NOT_APPLICABLE | NOT_APPLICABLE | static void setUpDialog(DOMWindow* dialog, void* handler)
{
static_cast<DialogHandler*>(handler)->dialogCreated(dialog);
}
| 0 |
linux | 04f5866e41fb70690e28397487d8bd8eea7d712a | NOT_APPLICABLE | NOT_APPLICABLE | void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
struct ib_uverbs_completion_event_file *ev_file,
struct ib_ucq_object *uobj)
{
struct ib_uverbs_event *evt, *tmp;
if (ev_file) {
spin_lock_irq(&ev_file->ev_queue.lock);
list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
list_del(&evt->list);
kfree(evt);
}
spin_unlock_irq(&ev_file->ev_queue.lock);
uverbs_uobject_put(&ev_file->uobj);
}
spin_lock_irq(&file->async_file->ev_queue.lock);
list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
list_del(&evt->list);
kfree(evt);
}
spin_unlock_irq(&file->async_file->ev_queue.lock);
}
| 0 |
tensorflow | 240655511cd3e701155f944a972db71b6c0b1bb6 | NOT_APPLICABLE | NOT_APPLICABLE | bool HasTPUAttributes(const NodeDef& node) {
AttrSlice attrs(node);
for (const auto& attr : attrs) {
if (attr.first.find("_tpu_") != attr.first.npos) {
return true;
}
}
return false;
} | 0 |
radare2 | 40b021ba29c8f90ccf7c879fde2580bc73a17e8e | NOT_APPLICABLE | NOT_APPLICABLE | static void r_bin_mdmp_free_pe32_bin(void *pe_bin_) {
struct Pe32_r_bin_mdmp_pe_bin *pe_bin = pe_bin_;
if (pe_bin) {
sdb_free (pe_bin->bin->kv);
Pe32_r_bin_pe_free (pe_bin->bin);
R_FREE (pe_bin);
}
}
| 0 |
openmpt | 492022c7297ede682161d9c0ec2de15526424e76 | NOT_APPLICABLE | NOT_APPLICABLE | PLUGINDEX CSoundFile::GetChannelPlugin(CHANNELINDEX nChn, PluginMutePriority respectMutes) const
{
const ModChannel &channel = m_PlayState.Chn[nChn];
PLUGINDEX nPlugin;
if((respectMutes == RespectMutes && channel.dwFlags[CHN_MUTE]) || channel.dwFlags[CHN_NOFX])
{
nPlugin = 0;
} else
{
if (nChn >= m_nChannels && channel.nMasterChn > 0)
{
nChn = channel.nMasterChn - 1;
}
if(nChn < MAX_BASECHANNELS)
{
nPlugin = ChnSettings[nChn].nMixPlugin;
} else
{
nPlugin = 0;
}
}
return nPlugin;
}
| 0 |
linux | 81cdb259fb6d8c1c4ecfeea389ff5a73c07f5755 | NOT_APPLICABLE | NOT_APPLICABLE | static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
{
int i;
struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
eoi_inject.work);
spin_lock(&ioapic->lock);
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
continue;
if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
ioapic_service(ioapic, i, false);
}
spin_unlock(&ioapic->lock);
}
| 0 |
linux | fb58fdcd295b914ece1d829b24df00a17a9624bc | NOT_APPLICABLE | NOT_APPLICABLE | static void intel_iommu_domain_free(struct iommu_domain *domain)
{
domain_exit(to_dmar_domain(domain));
} | 0 |
linux | 81f9c4e4177d31ced6f52a89bb70e93bfb77ca03 | NOT_APPLICABLE | NOT_APPLICABLE | static void *s_start(struct seq_file *m, loff_t *pos)
{
struct trace_iterator *iter = m->private;
struct trace_array *tr = iter->tr;
int cpu_file = iter->cpu_file;
void *p = NULL;
loff_t l = 0;
int cpu;
/*
* copy the tracer to avoid using a global lock all around.
* iter->trace is a copy of current_trace, the pointer to the
* name may be used instead of a strcmp(), as iter->trace->name
* will point to the same string as current_trace->name.
*/
mutex_lock(&trace_types_lock);
if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
*iter->trace = *tr->current_trace;
mutex_unlock(&trace_types_lock);
#ifdef CONFIG_TRACER_MAX_TRACE
if (iter->snapshot && iter->trace->use_max_tr)
return ERR_PTR(-EBUSY);
#endif
if (!iter->snapshot)
atomic_inc(&trace_record_taskinfo_disabled);
if (*pos != iter->pos) {
iter->ent = NULL;
iter->cpu = 0;
iter->idx = -1;
if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu)
tracing_iter_reset(iter, cpu);
} else
tracing_iter_reset(iter, cpu_file);
iter->leftover = 0;
for (p = iter; p && l < *pos; p = s_next(m, p, &l))
;
} else {
/*
* If we overflowed the seq_file before, then we want
* to just reuse the trace_seq buffer again.
*/
if (iter->leftover)
p = iter;
else {
l = *pos - 1;
p = s_next(m, p, &l);
}
}
trace_event_read_lock();
trace_access_lock(cpu_file);
return p;
}
| 0 |
linux | b4487b93545214a9db8cbf32e86411677b0cca21 | NOT_APPLICABLE | NOT_APPLICABLE | static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_GETFH, decode_getfh_maxsz, hdr);
} | 0 |
Chrome | 0ab2412a104d2f235d7b9fe19d30ef605a410832 | NOT_APPLICABLE | NOT_APPLICABLE | bool Document::CanCreateHistoryEntry() const {
if (!GetSettings() || !GetSettings()->GetHistoryEntryRequiresUserGesture())
return true;
if (frame_->HasReceivedUserGesture())
return true;
return ElapsedTime() >= kElapsedTimeForHistoryEntryWithoutUserGestureMS;
}
| 0 |
hhvm | 08193b7f0cd3910256e00d599f0f3eb2519c44ca | NOT_APPLICABLE | NOT_APPLICABLE | bool HHVM_FUNCTION(imagegammacorrect, const Resource& image,
double inputgamma, double outputgamma) {
gdImagePtr im = get_valid_image_resource(image);
if (!im) return false;
if (inputgamma <= 0.0 || outputgamma <= 0.0) {
raise_warning("Gamma values should be positive");
return false;
}
if (gdImageTrueColor(im)) {
int x, y, c;
for (y = 0; y < gdImageSY(im); y++) {
for (x = 0; x < gdImageSX(im); x++) {
c = gdImageGetPixel(im, x, y);
gdImageSetPixel(im, x, y,
gdTrueColor((int)((pow((pow((gdTrueColorGetRed(c)/255.0),
inputgamma)),1.0/outputgamma)*255) + .5),
(int)((pow((pow((gdTrueColorGetGreen(c)/255.0),
inputgamma)),1.0/outputgamma) * 255) + .5),
(int)((pow((pow((gdTrueColorGetBlue(c)/255.0),
inputgamma)),1.0/outputgamma) * 255) + .5)));
}
}
return true;
}
for (int i = 0; i < gdImageColorsTotal(im); i++) {
im->red[i] = (int)((pow((pow((im->red[i]/255.0), inputgamma)),
1.0/outputgamma)*255) + .5);
im->green[i] = (int)((pow((pow((im->green[i]/255.0), inputgamma)),
1.0/outputgamma)*255) + .5);
im->blue[i] = (int)((pow((pow((im->blue[i]/255.0), inputgamma)),
1.0/outputgamma)*255) + .5);
}
return true;
} | 0 |
elog | 993bed4923c88593cc6b1186e0d1b9564994a25a | NOT_APPLICABLE | NOT_APPLICABLE | char *month_name(int m)
/* return name of month in current locale, m=0..11 */
{
struct tm ts;
static char name[32];
memset(&ts, 0, sizeof(ts));
ts.tm_mon = m;
ts.tm_mday = 15;
ts.tm_year = 2000;
mktime(&ts);
strftime(name, sizeof(name), "%B", &ts);
return name;
} | 0 |
Subsets and Splits