project
stringclasses
791 values
commit_id
stringlengths
6
81
CVE ID
stringlengths
13
16
CWE ID
stringclasses
127 values
func
stringlengths
5
484k
vul
int8
0
1
linux
fa40d9734a57bcbfa79a280189799f76c88f7bb0
NOT_APPLICABLE
NOT_APPLICABLE
static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey, u16 gen, u8 mode, u32 dnode) { struct sk_buff_head pkts; struct tipc_msg *hdr; struct sk_buff *skb; u16 size, cong_link_cnt; u8 *data; int rc; size = tipc_aead_key_size(skey); skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC); if (!skb) return -ENOMEM; hdr = buf_msg(skb); tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG, INT_H_SIZE, dnode); msg_set_size(hdr, INT_H_SIZE + size); msg_set_key_gen(hdr, gen); msg_set_key_mode(hdr, mode); data = msg_data(hdr); *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen); memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME); memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key, skey->keylen); __skb_queue_head_init(&pkts); __skb_queue_tail(&pkts, skb); if (dnode) rc = tipc_node_xmit(net, &pkts, dnode, 0); else rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt); return rc; }
0
libvirt
1ac703a7d0789e46833f4013a3876c2e3af18ec7
NOT_APPLICABLE
NOT_APPLICABLE
qemuProcessHandlePMSuspend(qemuMonitorPtr mon G_GNUC_UNUSED, virDomainObjPtr vm, void *opaque) { virQEMUDriverPtr driver = opaque; virObjectEventPtr event = NULL; virObjectEventPtr lifecycleEvent = NULL; g_autoptr(virQEMUDriverConfig) cfg = virQEMUDriverGetConfig(driver); virObjectLock(vm); event = virDomainEventPMSuspendNewFromObj(vm); if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { qemuDomainObjPrivatePtr priv = vm->privateData; VIR_DEBUG("Transitioned guest %s to pmsuspended state due to " "QMP suspend event", vm->def->name); virDomainObjSetState(vm, VIR_DOMAIN_PMSUSPENDED, VIR_DOMAIN_PMSUSPENDED_UNKNOWN); lifecycleEvent = virDomainEventLifecycleNewFromObj(vm, VIR_DOMAIN_EVENT_PMSUSPENDED, VIR_DOMAIN_EVENT_PMSUSPENDED_MEMORY); if (virDomainObjSave(vm, driver->xmlopt, cfg->stateDir) < 0) { VIR_WARN("Unable to save status on vm %s after suspend event", vm->def->name); } if (priv->agent) qemuAgentNotifyEvent(priv->agent, QEMU_AGENT_EVENT_SUSPEND); } virObjectUnlock(vm); virObjectEventStateQueue(driver->domainEventState, event); virObjectEventStateQueue(driver->domainEventState, lifecycleEvent); return 0; }
0
wireshark
2c13e97d656c1c0ac4d76eb9d307664aae0e0cf7
NOT_APPLICABLE
NOT_APPLICABLE
check_rpcap_heur (tvbuff_t *tvb, gboolean tcp) { gint offset = 0; guint8 version, msg_type; guint16 msg_value; guint32 plen, len, caplen; if (tvb_captured_length (tvb) < 8) /* Too short */ return FALSE; version = tvb_get_guint8 (tvb, offset); if (version != 0) /* Incorrect version */ return FALSE; offset++; msg_type = tvb_get_guint8 (tvb, offset); if (!tcp && msg_type != 7) { /* UDP is only used for packets */ return FALSE; } if (try_val_to_str(msg_type, message_type) == NULL) /* Unknown message type */ return FALSE; offset++; msg_value = tvb_get_ntohs (tvb, offset); if (msg_value > 0) { if (msg_type == RPCAP_MSG_ERROR) { /* Must have a valid error code */ if (try_val_to_str(msg_value, error_codes) == NULL) return FALSE; } else if (msg_type != RPCAP_MSG_FINDALLIF_REPLY) { return FALSE; } } offset += 2; plen = tvb_get_ntohl (tvb, offset); offset += 4; len = (guint32) tvb_reported_length_remaining (tvb, offset); switch (msg_type) { case RPCAP_MSG_FINDALLIF_REQ: case RPCAP_MSG_UPDATEFILTER_REPLY: case RPCAP_MSG_AUTH_REPLY: case RPCAP_MSG_STATS_REQ: case RPCAP_MSG_CLOSE: case RPCAP_MSG_SETSAMPLING_REPLY: case RPCAP_MSG_ENDCAP_REQ: case RPCAP_MSG_ENDCAP_REPLY: /* Empty payload */ if (plen != 0 || len != 0) return FALSE; break; case RPCAP_MSG_OPEN_REPLY: case RPCAP_MSG_STARTCAP_REPLY: case RPCAP_MSG_SETSAMPLING_REQ: /* Always 8 bytes */ if (plen != 8 || len != 8) return FALSE; break; case RPCAP_MSG_STATS_REPLY: /* Always 16 bytes */ if (plen != 16 || len != 16) return FALSE; break; case RPCAP_MSG_PACKET: /* Must have the frame header */ if (plen < 20) return FALSE; /* Check if capture length is valid */ caplen = tvb_get_ntohl (tvb, offset+8); /* Always 20 bytes less than packet length */ if (caplen != (plen - 20) || caplen > 65535) return FALSE; break; case RPCAP_MSG_FINDALLIF_REPLY: case RPCAP_MSG_ERROR: case RPCAP_MSG_OPEN_REQ: case RPCAP_MSG_STARTCAP_REQ: case RPCAP_MSG_UPDATEFILTER_REQ: case RPCAP_MSG_AUTH_REQ: /* Variable length */ if (plen != len) return FALSE; break; default: /* Unknown message type */ return FALSE; } return TRUE; }
0
linux-2.6
0f13864e5b24d9cbe18d125d41bfa4b726a82e40
NOT_APPLICABLE
NOT_APPLICABLE
isdn_net_ciscohdlck_slarp_send_request(isdn_net_local *lp) { struct sk_buff *skb; unsigned char *p; skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14); if (!skb) return; p = skb_put(skb, 4 + 14); /* cisco header */ p += put_u8 (p, CISCO_ADDR_UNICAST); p += put_u8 (p, CISCO_CTRL); p += put_u16(p, CISCO_TYPE_SLARP); /* slarp request */ p += put_u32(p, CISCO_SLARP_REQUEST); p += put_u32(p, 0); // address p += put_u32(p, 0); // netmask p += put_u16(p, 0); // unused isdn_net_write_super(lp, skb); }
0
linux
a8b33654b1e3b0c74d4a1fed041c9aae50b3c427
NOT_APPLICABLE
NOT_APPLICABLE
static void mp_close(struct tty_struct *tty, struct file *filp) { struct sb_uart_state *state = tty->driver_data; struct sb_uart_port *port; printk("mp_close!\n"); if (!state || !state->port) return; port = state->port; printk("close1 %d\n", __LINE__); MP_STATE_LOCK(state); printk("close2 %d\n", __LINE__); if (tty_hung_up_p(filp)) goto done; printk("close3 %d\n", __LINE__); if ((tty->count == 1) && (state->count != 1)) { printk("mp_close: bad serial port count; tty->count is 1, " "state->count is %d\n", state->count); state->count = 1; } printk("close4 %d\n", __LINE__); if (--state->count < 0) { printk("rs_close: bad serial port count for ttyMP%d: %d\n", port->line, state->count); state->count = 0; } if (state->count) goto done; tty->closing = 1; printk("close5 %d\n", __LINE__); if (state->closing_wait != USF_CLOSING_WAIT_NONE) tty_wait_until_sent(tty, state->closing_wait); printk("close6 %d\n", __LINE__); if (state->info->flags & UIF_INITIALIZED) { unsigned long flags; spin_lock_irqsave(&port->lock, flags); port->ops->stop_rx(port); spin_unlock_irqrestore(&port->lock, flags); mp_wait_until_sent(tty, port->timeout); } printk("close7 %d\n", __LINE__); mp_shutdown(state); printk("close8 %d\n", __LINE__); mp_flush_buffer(tty); tty_ldisc_flush(tty); tty->closing = 0; state->info->tty = NULL; if (state->info->blocked_open) { if (state->close_delay) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(state->close_delay); } } else { mp_change_pm(state, 3); } printk("close8 %d\n", __LINE__); state->info->flags &= ~UIF_NORMAL_ACTIVE; wake_up_interruptible(&state->info->open_wait); done: printk("close done\n"); MP_STATE_UNLOCK(state); module_put(THIS_MODULE); }
0
Chrome
fcd3a7a671ecf2d5f46ea34787d27507a914d2f5
NOT_APPLICABLE
NOT_APPLICABLE
GetSyncableServiceForType(syncable::ModelType type) { if (!profile_) { // For tests. return base::WeakPtr<SyncableService>(); } switch (type) { case syncable::PREFERENCES: return profile_->GetPrefs()->GetSyncableService()->AsWeakPtr(); case syncable::AUTOFILL: case syncable::AUTOFILL_PROFILE: { if (!web_data_service_.get()) return base::WeakPtr<SyncableService>(); if (type == syncable::AUTOFILL) { return web_data_service_->GetAutocompleteSyncableService()->AsWeakPtr(); } else { return web_data_service_-> GetAutofillProfileSyncableService()->AsWeakPtr(); } } case syncable::APPS: case syncable::EXTENSIONS: return extension_system_->extension_service()->AsWeakPtr(); case syncable::SEARCH_ENGINES: return TemplateURLServiceFactory::GetForProfile(profile_)->AsWeakPtr(); case syncable::APP_SETTINGS: case syncable::EXTENSION_SETTINGS: return extension_system_->extension_service()->settings_frontend()-> GetBackendForSync(type)->AsWeakPtr(); case syncable::APP_NOTIFICATIONS: return extension_system_->extension_service()-> app_notification_manager()->AsWeakPtr(); default: NOTREACHED(); return base::WeakPtr<SyncableService>(); } }
0
mongo
64095239f41e9f3841d8be9088347db56d35c891
NOT_APPLICABLE
NOT_APPLICABLE
TEST(LtOp, MatchesNull) { BSONObj operand = BSON("$lt" << BSONNULL); LTMatchExpression lt("a", operand["$lt"]); ASSERT(!lt.matchesBSON(BSONObj(), NULL)); ASSERT(!lt.matchesBSON(BSON("a" << BSONNULL), NULL)); ASSERT(!lt.matchesBSON(BSON("a" << 4), NULL)); // A non-existent field is treated same way as an empty bson object ASSERT(!lt.matchesBSON(BSON("b" << 4), NULL)); }
0
tensorflow
87158f43f05f2720a374f3e6d22a7aaa3a33f750
NOT_APPLICABLE
NOT_APPLICABLE
static void Run(OpKernelContext *ctx, typename TTypes<T>::Scalar &s, const typename TTypes<T>::UnalignedVec &v) { s.device(ctx->eigen_cpu_device()) = v.sum(); }
0
openvpn
11d21349a4e7e38a025849479b36ace7c2eec2ee
NOT_APPLICABLE
NOT_APPLICABLE
md5_digest_equal (const struct md5_digest *d1, const struct md5_digest *d2) { return memcmp(d1->digest, d2->digest, MD5_DIGEST_LENGTH) == 0; }
0
OpenJK
11a83410153756ae350a82ed41b08d128ff7f998
NOT_APPLICABLE
NOT_APPLICABLE
void Com_AppendCDKey( const char *filename ) { fileHandle_t f; char buffer[33]; char fbuffer[MAX_OSPATH]; Com_sprintf(fbuffer, sizeof(fbuffer), "%s/rtcwkey", filename); FS_SV_FOpenFileRead( fbuffer, &f ); if ( !f ) { Q_strncpyz( &cl_cdkey[16], " ", 17 ); return; } Com_Memset( buffer, 0, sizeof( buffer ) ); FS_Read( buffer, 16, f ); FS_FCloseFile( f ); if ( CL_CDKeyValidate( buffer, NULL ) ) { strcat( &cl_cdkey[16], buffer ); } else { Q_strncpyz( &cl_cdkey[16], " ", 17 ); } }
0
Chrome
f045c704568e9cf6279b3cbccbec6d86c35f8a13
NOT_APPLICABLE
NOT_APPLICABLE
void FileSystemManagerImpl::OnConnectionError() { DCHECK_CURRENTLY_ON(BrowserThread::IO); if (bindings_.empty()) { in_transit_snapshot_files_.Clear(); operation_runner_.reset(); cancellable_operations_.CloseAllBindings(); } }
0
Chrome
c71d8045ce0592cf3f4290744ab57b23c1d1b4c6
NOT_APPLICABLE
NOT_APPLICABLE
void RenderFrameDevToolsAgentHost::RenderFrameHostChanged( RenderFrameHost* old_host, RenderFrameHost* new_host) { if (old_host != frame_host_) return; UpdateFrameHost(nullptr); }
0
kvm
e42d9b8141d1f54ff72ad3850bb110c95a5f3b88
NOT_APPLICABLE
NOT_APPLICABLE
static void decode_register_operand(struct operand *op, struct decode_cache *c, int inhibit_bytereg) { unsigned reg = c->modrm_reg; int highbyte_regs = c->rex_prefix == 0; if (!(c->d & ModRM)) reg = (c->b & 7) | ((c->rex_prefix & 1) << 3); op->type = OP_REG; if ((c->d & ByteOp) && !inhibit_bytereg) { op->ptr = decode_register(reg, c->regs, highbyte_regs); op->val = *(u8 *)op->ptr; op->bytes = 1; } else { op->ptr = decode_register(reg, c->regs, 0); op->bytes = c->op_bytes; switch (op->bytes) { case 2: op->val = *(u16 *)op->ptr; break; case 4: op->val = *(u32 *)op->ptr; break; case 8: op->val = *(u64 *) op->ptr; break; } } op->orig_val = op->val; }
0
Chrome
b276d0570cc816bfe25b431f2ee9bc265a6ad478
NOT_APPLICABLE
NOT_APPLICABLE
std::string TestURLLoader::TestTrustedCorbEligibleRequest() { std::string cross_origin_url = GetReachableCrossOriginURL("corb_eligible_resource.json"); pp::URLRequestInfo request(instance_); request.SetURL(cross_origin_url); request.SetAllowCrossOriginRequests(true); std::string response_body; int32_t rv = OpenTrusted(request, &response_body); if (rv != PP_OK) return ReportError("Trusted CORB-eligible request failed", rv); ASSERT_EQ("{ \"foo\": \"bar\" }\n", response_body); PASS(); }
0
linux
0625b4ba1a5d4703c7fb01c497bd6c156908af00
NOT_APPLICABLE
NOT_APPLICABLE
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *verbs_init_attr, struct ib_udata *udata) { struct mlx5_ib_dev *dev; struct mlx5_ib_qp *qp; u16 xrcdn = 0; int err; struct ib_qp_init_attr mlx_init_attr; struct ib_qp_init_attr *init_attr = verbs_init_attr; if (pd) { dev = to_mdev(pd->device); if (init_attr->qp_type == IB_QPT_RAW_PACKET) { if (!pd->uobject) { mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n"); return ERR_PTR(-EINVAL); } else if (!to_mucontext(pd->uobject->context)->cqe_version) { mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n"); return ERR_PTR(-EINVAL); } } } else { /* being cautious here */ if (init_attr->qp_type != IB_QPT_XRC_TGT && init_attr->qp_type != MLX5_IB_QPT_REG_UMR) { pr_warn("%s: no PD for transport %s\n", __func__, ib_qp_type_str(init_attr->qp_type)); return ERR_PTR(-EINVAL); } dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); } if (init_attr->qp_type == IB_QPT_DRIVER) { struct mlx5_ib_create_qp ucmd; init_attr = &mlx_init_attr; memcpy(init_attr, verbs_init_attr, sizeof(*verbs_init_attr)); err = set_mlx_qp_type(dev, init_attr, &ucmd, udata); if (err) return ERR_PTR(err); if (init_attr->qp_type == MLX5_IB_QPT_DCI) { if (init_attr->cap.max_recv_wr || init_attr->cap.max_recv_sge) { mlx5_ib_dbg(dev, "DCI QP requires zero size receive queue\n"); return ERR_PTR(-EINVAL); } } else { return mlx5_ib_create_dct(pd, init_attr, &ucmd); } } switch (init_attr->qp_type) { case IB_QPT_XRC_TGT: case IB_QPT_XRC_INI: if (!MLX5_CAP_GEN(dev->mdev, xrc)) { mlx5_ib_dbg(dev, "XRC not supported\n"); return ERR_PTR(-ENOSYS); } init_attr->recv_cq = NULL; if (init_attr->qp_type == IB_QPT_XRC_TGT) { xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn; init_attr->send_cq = NULL; } /* fall through */ case IB_QPT_RAW_PACKET: case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_UD: case IB_QPT_SMI: case MLX5_IB_QPT_HW_GSI: case MLX5_IB_QPT_REG_UMR: case MLX5_IB_QPT_DCI: qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); err = create_qp_common(dev, pd, init_attr, udata, qp); if (err) { mlx5_ib_dbg(dev, "create_qp_common failed\n"); kfree(qp); return ERR_PTR(err); } if (is_qp0(init_attr->qp_type)) qp->ibqp.qp_num = 0; else if (is_qp1(init_attr->qp_type)) qp->ibqp.qp_num = 1; else qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn; mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1); qp->trans_qp.xrcdn = xrcdn; break; case IB_QPT_GSI: return mlx5_ib_gsi_create_qp(pd, init_attr); case IB_QPT_RAW_IPV6: case IB_QPT_RAW_ETHERTYPE: case IB_QPT_MAX: default: mlx5_ib_dbg(dev, "unsupported qp type %d\n", init_attr->qp_type); /* Don't support raw QPs */ return ERR_PTR(-EINVAL); } if (verbs_init_attr->qp_type == IB_QPT_DRIVER) qp->qp_sub_type = init_attr->qp_type; return &qp->ibqp; }
0
linux-2.6
9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8
NOT_APPLICABLE
NOT_APPLICABLE
int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait) { int err; struct mfc_cache *cache; struct rtable *rt = (struct rtable*)skb->dst; read_lock(&mrt_lock); cache = ipmr_cache_find(rt->rt_src, rt->rt_dst); if (cache==NULL) { struct net_device *dev; int vif; if (nowait) { read_unlock(&mrt_lock); return -EAGAIN; } dev = skb->dev; if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) { read_unlock(&mrt_lock); return -ENODEV; } skb->nh.raw = skb_push(skb, sizeof(struct iphdr)); skb->nh.iph->ihl = sizeof(struct iphdr)>>2; skb->nh.iph->saddr = rt->rt_src; skb->nh.iph->daddr = rt->rt_dst; skb->nh.iph->version = 0; err = ipmr_cache_unresolved(vif, skb); read_unlock(&mrt_lock); return err; } if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) cache->mfc_flags |= MFC_NOTIFY; err = ipmr_fill_mroute(skb, cache, rtm); read_unlock(&mrt_lock); return err; }
0
linux
bcf3b67d16a4c8ffae0aa79de5853435e683945c
NOT_APPLICABLE
NOT_APPLICABLE
inline int megasas_cmd_type(struct scsi_cmnd *cmd) { int ret; switch (cmd->cmnd[0]) { case READ_10: case WRITE_10: case READ_12: case WRITE_12: case READ_6: case WRITE_6: case READ_16: case WRITE_16: ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? READ_WRITE_LDIO : READ_WRITE_SYSPDIO; break; default: ret = (MEGASAS_IS_LOGICAL(cmd->device)) ? NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO; } return ret; }
0
linux
193e87143c290ec16838f5368adc0e0bc94eb931
NOT_APPLICABLE
NOT_APPLICABLE
static void cqspi_controller_init(struct cqspi_st *cqspi) { cqspi_controller_enable(cqspi, 0); /* Configure the remap address register, no remap */ writel(0, cqspi->iobase + CQSPI_REG_REMAP); /* Disable all interrupts. */ writel(0, cqspi->iobase + CQSPI_REG_IRQMASK); /* Configure the SRAM split to 1:1 . */ writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); /* Load indirect trigger address. */ writel(cqspi->trigger_address, cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); /* Program read watermark -- 1/2 of the FIFO. */ writel(cqspi->fifo_depth * cqspi->fifo_width / 2, cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); /* Program write watermark -- 1/8 of the FIFO. */ writel(cqspi->fifo_depth * cqspi->fifo_width / 8, cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); cqspi_controller_enable(cqspi, 1); }
0
Chrome
6d9425ec7badda912555d46ea7abcfab81fdd9b9
NOT_APPLICABLE
NOT_APPLICABLE
SynchronousCompositorOutputSurface::~SynchronousCompositorOutputSurface() {}
0
Android
04839626ed859623901ebd3a5fd483982186b59d
CVE-2016-1621
CWE-119
const Block* SimpleBlock::GetBlock() const { return &m_block; }
1
exiv2
c72d16f4c402a8acc2dfe06fe3d58bf6cf99069e
NOT_APPLICABLE
NOT_APPLICABLE
void TiffDataEntry::doAccept(TiffVisitor& visitor) { visitor.visitDataEntry(this); } // TiffDataEntry::doAccept
0
zziplib
596d9dfce2624e849417d4301e8d67935608aa5e
NOT_APPLICABLE
NOT_APPLICABLE
zzip_disk_findfile(ZZIP_DISK* disk, char* filename, struct zzip_disk_entry* after, zzip_strcmp_fn_t compare) { struct zzip_disk_entry* entry = (! after ? zzip_disk_findfirst (disk) : zzip_disk_findnext (disk, after)); if (! compare) compare = (zzip_strcmp_fn_t)( (disk->flags&1) ? (_zzip_strcasecmp) : (strcmp)); for (; entry ; entry = zzip_disk_findnext (disk, entry)) { /* filenames within zip files are often not null-terminated! */ char* realname = zzip_disk_entry_strdup_name (disk, entry); if (realname && ! compare(filename, realname)) { free (realname); return entry; } free (realname); } return 0; }
0
tcpdump
5edf405d7ed9fc92f4f43e8a3d44baa4c6387562
NOT_APPLICABLE
NOT_APPLICABLE
handle_assoc_request(netdissect_options *ndo, const u_char *p, u_int length) { struct mgmt_body_t pbody; int offset = 0; int ret; memset(&pbody, 0, sizeof(pbody)); if (!ND_TTEST2(*p, IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN)) return 0; if (length < IEEE802_11_CAPINFO_LEN + IEEE802_11_LISTENINT_LEN) return 0; pbody.capability_info = EXTRACT_LE_16BITS(p); offset += IEEE802_11_CAPINFO_LEN; length -= IEEE802_11_CAPINFO_LEN; pbody.listen_interval = EXTRACT_LE_16BITS(p+offset); offset += IEEE802_11_LISTENINT_LEN; length -= IEEE802_11_LISTENINT_LEN; ret = parse_elements(ndo, &pbody, p, offset, length); PRINT_SSID(pbody); PRINT_RATES(pbody); return ret; }
0
krb5
a7886f0ed1277c69142b14a2c6629175a6331edc
NOT_APPLICABLE
NOT_APPLICABLE
spnego_gss_set_name_attribute(OM_uint32 *minor_status, gss_name_t name, int complete, gss_buffer_t attr, gss_buffer_t value) { OM_uint32 ret; ret = gss_set_name_attribute(minor_status, name, complete, attr, value); return (ret); }
0
Chrome
c90c6ca59378d7e86d1a2f28fe96bada35df1508
NOT_APPLICABLE
NOT_APPLICABLE
void Browser::ShowOptionsTab(const std::string& sub_page) { GURL url(chrome::kChromeUISettingsURL + sub_page); ShowSingletonTab(url, true); }
0
linux
baff42ab1494528907bf4d5870359e31711746ae
NOT_APPLICABLE
NOT_APPLICABLE
int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) { struct scatterlist sg; sg_init_one(&sg, key->key, key->keylen); return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); }
0
Chrome
7da6c3419fd172405bcece1ae4ec6ec8316cd345
NOT_APPLICABLE
NOT_APPLICABLE
MockRenderWidgetHostImpl* CreateRenderWidgetHostForProcess( MockRenderProcessHost* process_host) { return MockRenderWidgetHostImpl::Create(render_widget_host_delegate(), process_host, process_host->GetNextRoutingID()); }
0
pgbouncer
4b92112b820830b30cd7bc91bef3dd8f35305525
NOT_APPLICABLE
NOT_APPLICABLE
static void for_each_server_filtered(PgPool *pool, void (*func)(PgSocket *sk), bool (*filter)(PgSocket *sk, void *arg), void *filter_arg) { struct List *item; PgSocket *sk; statlist_for_each(item, &pool->idle_server_list) { sk = container_of(item, PgSocket, head); if (filter(sk, filter_arg)) func(sk); } statlist_for_each(item, &pool->used_server_list) { sk = container_of(item, PgSocket, head); if (filter(sk, filter_arg)) func(sk); } statlist_for_each(item, &pool->tested_server_list) { sk = container_of(item, PgSocket, head); if (filter(sk, filter_arg)) func(sk); } statlist_for_each(item, &pool->active_server_list) { sk = container_of(item, PgSocket, head); if (filter(sk, filter_arg)) func(sk); } statlist_for_each(item, &pool->new_server_list) { sk = container_of(item, PgSocket, head); if (filter(sk, filter_arg)) func(sk); } }
0
libarchive
22531545514043e04633e1c015c7540b9de9dbe4
NOT_APPLICABLE
NOT_APPLICABLE
_archive_write_filter_count(struct archive *_a) { struct archive_write *a = (struct archive_write *)_a; struct archive_write_filter *p = a->filter_first; int count = 0; while(p) { count++; p = p->next_filter; } return count; }
0
linux
efa9ace68e487ddd29c2b4d6dd23242158f1f607
NOT_APPLICABLE
NOT_APPLICABLE
struct device_node *dlpar_configure_connector(__be32 drc_index, struct device_node *parent) { struct device_node *dn; struct device_node *first_dn = NULL; struct device_node *last_dn = NULL; struct property *property; struct property *last_property = NULL; struct cc_workarea *ccwa; char *data_buf; int cc_token; int rc = -1; cc_token = rtas_token("ibm,configure-connector"); if (cc_token == RTAS_UNKNOWN_SERVICE) return NULL; data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); if (!data_buf) return NULL; ccwa = (struct cc_workarea *)&data_buf[0]; ccwa->drc_index = drc_index; ccwa->zero = 0; do { /* Since we release the rtas_data_buf lock between configure * connector calls we want to re-populate the rtas_data_buffer * with the contents of the previous call. */ spin_lock(&rtas_data_buf_lock); memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE); rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL); memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); spin_unlock(&rtas_data_buf_lock); switch (rc) { case COMPLETE: break; case NEXT_SIBLING: dn = dlpar_parse_cc_node(ccwa); if (!dn) goto cc_error; dn->parent = last_dn->parent; last_dn->sibling = dn; last_dn = dn; break; case NEXT_CHILD: dn = dlpar_parse_cc_node(ccwa); if (!dn) goto cc_error; if (!first_dn) { dn->parent = parent; first_dn = dn; } else { dn->parent = last_dn; if (last_dn) last_dn->child = dn; } last_dn = dn; break; case NEXT_PROPERTY: property = dlpar_parse_cc_property(ccwa); if (!property) goto cc_error; if (!last_dn->properties) last_dn->properties = property; else last_property->next = property; last_property = property; break; case PREV_PARENT: last_dn = last_dn->parent; break; case CALL_AGAIN: break; case MORE_MEMORY: case ERR_CFG_USE: default: printk(KERN_ERR "Unexpected Error (%d) " "returned from configure-connector\n", rc); goto cc_error; } } while (rc); cc_error: kfree(data_buf); if (rc) { if (first_dn) dlpar_free_cc_nodes(first_dn); return NULL; } return first_dn; }
0
vim
d6c67629ed05aae436164eec474832daf8ba7420
NOT_APPLICABLE
NOT_APPLICABLE
qf_push_dir(char_u *dirbuf, struct dir_stack_T **stackptr, int is_file_stack) { struct dir_stack_T *ds_new; struct dir_stack_T *ds_ptr; // allocate new stack element and hook it in ds_new = ALLOC_ONE_ID(struct dir_stack_T, aid_qf_dirstack); if (ds_new == NULL) return NULL; ds_new->next = *stackptr; *stackptr = ds_new; // store directory on the stack if (vim_isAbsName(dirbuf) || (*stackptr)->next == NULL || is_file_stack) (*stackptr)->dirname = vim_strsave(dirbuf); else { // Okay we don't have an absolute path. // dirbuf must be a subdir of one of the directories on the stack. // Let's search... ds_new = (*stackptr)->next; (*stackptr)->dirname = NULL; while (ds_new) { vim_free((*stackptr)->dirname); (*stackptr)->dirname = concat_fnames(ds_new->dirname, dirbuf, TRUE); if (mch_isdir((*stackptr)->dirname) == TRUE) break; ds_new = ds_new->next; } // clean up all dirs we already left while ((*stackptr)->next != ds_new) { ds_ptr = (*stackptr)->next; (*stackptr)->next = (*stackptr)->next->next; vim_free(ds_ptr->dirname); vim_free(ds_ptr); } // Nothing found -> it must be on top level if (ds_new == NULL) { vim_free((*stackptr)->dirname); (*stackptr)->dirname = vim_strsave(dirbuf); } } if ((*stackptr)->dirname != NULL) return (*stackptr)->dirname; else { ds_ptr = *stackptr; *stackptr = (*stackptr)->next; vim_free(ds_ptr); return NULL; } }
0
linux
8b8a321ff72c785ed5e8b4cf6eda20b35d427390
NOT_APPLICABLE
NOT_APPLICABLE
int tcp_conn_request(struct request_sock_ops *rsk_ops, const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct sk_buff *skb) { struct tcp_fastopen_cookie foc = { .len = -1 }; __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn; struct tcp_options_received tmp_opt; struct tcp_sock *tp = tcp_sk(sk); struct sock *fastopen_sk = NULL; struct dst_entry *dst = NULL; struct request_sock *req; bool want_cookie = false; struct flowi fl; /* TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. */ if ((sysctl_tcp_syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) { want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name); if (!want_cookie) goto drop; } /* Accept backlog is full. If we have already queued enough * of warm entries in syn queue, drop request. It is better than * clogging syn queue with openreqs with exponentially increasing * timeout. */ if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); goto drop; } req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie); if (!req) goto drop; tcp_rsk(req)->af_specific = af_ops; tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = af_ops->mss_clamp; tmp_opt.user_mss = tp->rx_opt.user_mss; tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc); if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tcp_openreq_init(req, &tmp_opt, skb, sk); /* Note: tcp_v6_init_req() might override ir_iif for link locals */ inet_rsk(req)->ir_iif = sk->sk_bound_dev_if; af_ops->init_req(req, sk, skb); if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; if (!want_cookie && !isn) { /* VJ's idea. We save last timestamp seen * from the destination in peer table, when entering * state TIME-WAIT, and check against it before * accepting new connection request. * * If "isn" is not zero, this request hit alive * timewait bucket, so that all the necessary checks * are made in the function processing timewait state. */ if (tcp_death_row.sysctl_tw_recycle) { bool strict; dst = af_ops->route_req(sk, &fl, req, &strict); if (dst && strict && !tcp_peer_is_proven(req, dst, true, tmp_opt.saw_tstamp)) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); goto drop_and_release; } } /* Kill the following clause, if you dislike this way. */ else if (!sysctl_tcp_syncookies && (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < (sysctl_max_syn_backlog >> 2)) && !tcp_peer_is_proven(req, dst, false, tmp_opt.saw_tstamp)) { /* Without syncookies last quarter of * backlog is filled with destinations, * proven to be alive. * It means that we continue to communicate * to destinations, already remembered * to the moment of synflood. */ pr_drop_req(req, ntohs(tcp_hdr(skb)->source), rsk_ops->family); goto drop_and_release; } isn = af_ops->init_seq(skb); } if (!dst) { dst = af_ops->route_req(sk, &fl, req, NULL); if (!dst) goto drop_and_free; } tcp_ecn_create_request(req, skb, sk, dst); if (want_cookie) { isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); req->cookie_ts = tmp_opt.tstamp_ok; if (!tmp_opt.tstamp_ok) inet_rsk(req)->ecn_ok = 0; } tcp_rsk(req)->snt_isn = isn; tcp_rsk(req)->txhash = net_tx_rndhash(); tcp_openreq_init_rwin(req, sk, dst); if (!want_cookie) { tcp_reqsk_record_syn(sk, req, skb); fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); } if (fastopen_sk) { af_ops->send_synack(fastopen_sk, dst, &fl, req, &foc, false); /* Add the child socket directly into the accept queue */ inet_csk_reqsk_queue_add(sk, req, fastopen_sk); sk->sk_data_ready(sk); bh_unlock_sock(fastopen_sk); sock_put(fastopen_sk); } else { tcp_rsk(req)->tfo_listener = false; if (!want_cookie) inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); af_ops->send_synack(sk, dst, &fl, req, &foc, !want_cookie); if (want_cookie) goto drop_and_free; } reqsk_put(req); return 0; drop_and_release: dst_release(dst); drop_and_free: reqsk_free(req); drop: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return 0; }
0
Chrome
befb46ae3385fa13975521e9a2281e35805b339e
NOT_APPLICABLE
NOT_APPLICABLE
const ResourceRequest& FrameLoader::originalRequest() const { return activeDocumentLoader()->originalRequestCopy(); }
0
cyrus-imapd
602f12ed2af0a49ac4a58affbfea57d0fc23dea5
NOT_APPLICABLE
NOT_APPLICABLE
EXPORTED int end_resp_headers(struct transaction_t *txn, long code) { int r = 0; if (txn->flags.ver == VER_2) { r = http2_end_headers(txn, code); } else { /* CRLF terminating the header block */ prot_puts(txn->conn->pout, "\r\n"); } return r; }
0
Chrome
2706470a422dec8f4ae2538e80f0e7e3c4f4f7f6
NOT_APPLICABLE
NOT_APPLICABLE
void PaymentRequest::HideIfNecessary() { display_handle_.reset(); }
0
nbd
3ef52043861ab16352d49af89e048ba6339d6df8
NOT_APPLICABLE
NOT_APPLICABLE
int copyonwrite_prepare(CLIENT* client) { off_t i; if ((client->difffilename = malloc(1024))==NULL) err("Failed to allocate string for diff file name"); snprintf(client->difffilename, 1024, "%s-%s-%d.diff",client->exportname,client->clientname, (int)getpid()) ; client->difffilename[1023]='\0'; msg3(LOG_INFO,"About to create map and diff file %s",client->difffilename) ; client->difffile=open(client->difffilename,O_RDWR | O_CREAT | O_TRUNC,0600) ; if (client->difffile<0) err("Could not create diff file (%m)") ; if ((client->difmap=calloc(client->exportsize/DIFFPAGESIZE,sizeof(u32)))==NULL) err("Could not allocate memory") ; for (i=0;i<client->exportsize/DIFFPAGESIZE;i++) client->difmap[i]=(u32)-1 ; return 0; }
0
Chrome
a4150b688a754d3d10d2ca385155b1c95d77d6ae
NOT_APPLICABLE
NOT_APPLICABLE
error::Error GLES2DecoderPassthroughImpl::DoMultiDrawEndCHROMIUM() { MultiDrawManager::ResultData result; if (!multi_draw_manager_->End(&result)) { return error::kInvalidArguments; } switch (result.draw_function) { case MultiDrawManager::DrawFunction::DrawArrays: api()->glMultiDrawArraysANGLEFn(result.mode, result.firsts.data(), result.counts.data(), result.drawcount); return error::kNoError; case MultiDrawManager::DrawFunction::DrawArraysInstanced: api()->glMultiDrawArraysInstancedANGLEFn( result.mode, result.firsts.data(), result.counts.data(), result.instance_counts.data(), result.drawcount); return error::kNoError; case MultiDrawManager::DrawFunction::DrawElements: api()->glMultiDrawElementsANGLEFn(result.mode, result.counts.data(), result.type, result.indices.data(), result.drawcount); return error::kNoError; case MultiDrawManager::DrawFunction::DrawElementsInstanced: api()->glMultiDrawElementsInstancedANGLEFn( result.mode, result.counts.data(), result.type, result.indices.data(), result.instance_counts.data(), result.drawcount); return error::kNoError; default: NOTREACHED(); return error::kLostContext; } }
0
FreeRDP
602f4a2e14b41703b5f431de3154cd46a5750a2d
NOT_APPLICABLE
NOT_APPLICABLE
static void zgfx_history_buffer_ring_read(ZGFX_CONTEXT* zgfx, int offset, BYTE* dst, UINT32 count) { UINT32 front; UINT32 index; UINT32 bytes; UINT32 valid; UINT32 bytesLeft; BYTE* dptr = dst; BYTE* origDst = dst; if (count <= 0) return; bytesLeft = count; index = (zgfx->HistoryIndex + zgfx->HistoryBufferSize - offset) % zgfx->HistoryBufferSize; bytes = MIN(bytesLeft, offset); if ((index + bytes) <= zgfx->HistoryBufferSize) { CopyMemory(dptr, &(zgfx->HistoryBuffer[index]), bytes); } else { front = zgfx->HistoryBufferSize - index; CopyMemory(dptr, &(zgfx->HistoryBuffer[index]), front); CopyMemory(&dptr[front], zgfx->HistoryBuffer, bytes - front); } if ((bytesLeft -= bytes) == 0) return; dptr += bytes; valid = bytes; do { bytes = valid; if (bytes > bytesLeft) bytes = bytesLeft; CopyMemory(dptr, origDst, bytes); dptr += bytes; valid <<= 1; } while ((bytesLeft -= bytes) > 0); }
0
linux
32452a3eb8b64e01e2be717f518c0be046975b9d
NOT_APPLICABLE
NOT_APPLICABLE
static void io_wq_submit_work(struct io_wq_work *work) { struct io_kiocb *req = container_of(work, struct io_kiocb, work); const struct io_op_def *def = &io_op_defs[req->opcode]; unsigned int issue_flags = IO_URING_F_UNLOCKED; bool needs_poll = false; struct io_kiocb *timeout; int ret = 0, err = -ECANCELED; /* one will be dropped by ->io_free_work() after returning to io-wq */ if (!(req->flags & REQ_F_REFCOUNT)) __io_req_set_refcount(req, 2); else req_ref_get(req); timeout = io_prep_linked_timeout(req); if (timeout) io_queue_linked_timeout(timeout); /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ if (work->flags & IO_WQ_WORK_CANCEL) { fail: io_req_task_queue_fail(req, err); return; } if (!io_assign_file(req, issue_flags)) { err = -EBADF; work->flags |= IO_WQ_WORK_CANCEL; goto fail; } if (req->flags & REQ_F_FORCE_ASYNC) { bool opcode_poll = def->pollin || def->pollout; if (opcode_poll && file_can_poll(req->file)) { needs_poll = true; issue_flags |= IO_URING_F_NONBLOCK; } } do { ret = io_issue_sqe(req, issue_flags); if (ret != -EAGAIN) break; /* * We can get EAGAIN for iopolled IO even though we're * forcing a sync submission from here, since we can't * wait for request slots on the block side. */ if (!needs_poll) { cond_resched(); continue; } if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK) return; /* aborted or ready, in either case retry blocking */ needs_poll = false; issue_flags &= ~IO_URING_F_NONBLOCK; } while (1); /* avoid locking problems by failing it from a clean context */ if (ret) io_req_task_queue_fail(req, ret);
0
linux
9b3e617f3df53822345a8573b6d358f6b9e5ed87
NOT_APPLICABLE
NOT_APPLICABLE
static void __vcc_insert_socket(struct sock *sk) { struct atm_vcc *vcc = atm_sk(sk); struct hlist_head *head = &vcc_hash[vcc->vci & (VCC_HTABLE_SIZE - 1)]; sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1); sk_add_node(sk, head); }
0
unicorn
c733bbada356b0373fa8aa72c044574bb855fd24
NOT_APPLICABLE
NOT_APPLICABLE
static inline void page_unlock__debug(const PageDesc *pd) { }
0
curl
curl-7_51_0-162-g3ab3c16
NOT_APPLICABLE
NOT_APPLICABLE
static int storebuffer(int output, FILE *data) { char **buffer = (char **)data; unsigned char outc = (unsigned char)output; **buffer = outc; (*buffer)++; return outc; /* act like fputc() ! */ }
0
sleuthkit
459ae818fc8dae717549810150de4d191ce158f1
NOT_APPLICABLE
NOT_APPLICABLE
static uint8_t yaffs_is_version_allocated(YAFFSFS_INFO * yfs, TSK_INUM_T inode){ YaffsCacheObject * obj; YaffsCacheVersion * version; YaffsCacheChunk * curr; TSK_RETVAL_ENUM result = yaffscache_version_find_by_inode(yfs, inode, &version, &obj); if (result != TSK_OK) { if (tsk_verbose) tsk_fprintf(stderr, "yaffs_is_version_allocated: yaffscache_version_find_by_inode failed! (inode: %d)\n", inode); return 0; } if(obj->yco_latest == version){ curr = obj->yco_latest->ycv_header_chunk; while(curr != NULL){ // We're looking for a newer unlinked or deleted header. If one exists, then this object should be considered unallocated if((curr->ycc_parent_id == YAFFS_OBJECT_UNLINKED) || (curr->ycc_parent_id == YAFFS_OBJECT_DELETED)){ return 0; } curr = curr ->ycc_next; } return 1; } else{ return 0; } }
0
Chrome
a8e17a3031b6ad69c399e5e04dd0084e577097fc
NOT_APPLICABLE
NOT_APPLICABLE
void HTMLFormControlElement::didRecalcStyle(StyleRecalcChange) { if (LayoutObject* layoutObject = this->layoutObject()) layoutObject->updateFromElement(); }
0
linux
f3d3342602f8bcbf37d7c46641cb9bca7618eb1c
NOT_APPLICABLE
NOT_APPLICABLE
static unsigned int unix_skb_len(const struct sk_buff *skb) { return skb->len - UNIXCB(skb).consumed; }
0
Chrome
108147dfd1ea159fd3632ef92ccc4ab8952980c7
NOT_APPLICABLE
NOT_APPLICABLE
void ContentSecurityPolicy::SetOverrideAllowInlineStyle(bool value) { override_inline_style_allowed_ = value; }
0
linux
2811ebac2521ceac84f2bdae402455baa6a7fb47
NOT_APPLICABLE
NOT_APPLICABLE
static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, gfp_t gfp) { return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; }
0
mariadb-connector-c
2759b87d72926b7c9b5426437a7c8dd15ff57945
NOT_APPLICABLE
NOT_APPLICABLE
mysql_debug(const char *debug __attribute__((unused))) { return; }
0
linux
5b9fbeb75b6a98955f628e205ac26689bcb1383e
NOT_APPLICABLE
NOT_APPLICABLE
static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u32 dst = insn->dst_reg, src = insn->src_reg; u8 opcode = BPF_OP(insn->code); int ret; dst_reg = &regs[dst]; if ((known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(env, dst_reg); return 0; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ if (opcode == BPF_SUB && env->allow_ptr_leaks) { __mark_reg_unknown(env, dst_reg); return 0; } verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } switch (ptr_reg->type) { case PTR_TO_MAP_VALUE_OR_NULL: verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: case PTR_TO_SOCKET: case PTR_TO_SOCKET_OR_NULL: case PTR_TO_SOCK_COMMON: case PTR_TO_SOCK_COMMON_OR_NULL: case PTR_TO_TCP_SOCK: case PTR_TO_TCP_SOCK_OR_NULL: case PTR_TO_XDP_SOCK: verbose(env, "R%d pointer arithmetic on %s prohibited\n", dst, reg_type_str[ptr_reg->type]); return -EACCES; case PTR_TO_MAP_VALUE: if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", off_reg == dst_reg ? dst : src); return -EACCES; } fallthrough; default: break; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) return -EINVAL; /* pointer types do not carry 32-bit bounds at the moment. */ __mark_reg32_unbounded(dst_reg); switch (opcode) { case BPF_ADD: ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); if (ret < 0) { verbose(env, "R%d tried to add from different maps or paths\n", dst); return ret; } /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->raw = 0; } break; case BPF_SUB: ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); if (ret < 0) { verbose(env, "R%d tried to sub from different maps or paths\n", dst); return ret; } if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->raw = ptr_reg->raw; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; dst_reg->raw = ptr_reg->raw; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->raw = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit. */ verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) return -EINVAL; __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); /* For unprivileged we require that resulting offset must be in bounds * in order to be able to sanitize access later on. */ if (!env->bypass_spec_v1) { if (dst_reg->type == PTR_TO_MAP_VALUE && check_map_access(env, dst, dst_reg->off, 1, false)) { verbose(env, "R%d pointer arithmetic of map value goes out of range, " "prohibited for !root\n", dst); return -EACCES; } else if (dst_reg->type == PTR_TO_STACK && check_stack_access(env, dst_reg, dst_reg->off + dst_reg->var_off.value, 1)) { verbose(env, "R%d stack pointer arithmetic goes out of range, " "prohibited for !root\n", dst); return -EACCES; } } return 0; }
0
fmt
8cf30aa2be256eba07bb1cefb998c52326e846e7
NOT_APPLICABLE
NOT_APPLICABLE
NamedArg(BasicStringRef<Char> argname, const T &value) : Arg(MakeArg< BasicFormatter<Char> >(value)), name(argname) {}
0
krb5
524688ce87a15fc75f87efc8c039ba4c7d5c197b
NOT_APPLICABLE
NOT_APPLICABLE
spnego_gss_export_sec_context( OM_uint32 *minor_status, gss_ctx_id_t *context_handle, gss_buffer_t interprocess_token) { OM_uint32 ret; ret = gss_export_sec_context(minor_status, context_handle, interprocess_token); return (ret); }
0
linux
f5364c150aa645b3d7daa21b5c0b9feaa1c9cd6d
NOT_APPLICABLE
NOT_APPLICABLE
static inline struct task_group *css_tg(struct cgroup_subsys_state *css) { return css ? container_of(css, struct task_group, css) : NULL; }
0
gdm
4e6e5335d29c039bed820c43bfd1c19cb62539ff
NOT_APPLICABLE
NOT_APPLICABLE
gdm_display_set_windowpath (GdmDisplay *self) { GdmDisplayPrivate *priv; /* setting WINDOWPATH for clients */ xcb_intern_atom_cookie_t atom_cookie; xcb_intern_atom_reply_t *atom_reply = NULL; xcb_get_property_cookie_t get_property_cookie; xcb_get_property_reply_t *get_property_reply = NULL; xcb_window_t root_window = XCB_WINDOW_NONE; const char *windowpath; char *newwindowpath; uint32_t num; char nums[10]; int numn; priv = gdm_display_get_instance_private (self); atom_cookie = xcb_intern_atom (priv->xcb_connection, 0, strlen("XFree86_VT"), "XFree86_VT"); atom_reply = xcb_intern_atom_reply (priv->xcb_connection, atom_cookie, NULL); if (atom_reply == NULL) { g_debug ("no XFree86_VT atom\n"); goto out; } root_window = get_root_window (priv->xcb_connection, priv->xcb_screen_number); if (root_window == XCB_WINDOW_NONE) { g_debug ("couldn't find root window\n"); goto out; } get_property_cookie = xcb_get_property (priv->xcb_connection, FALSE, root_window, atom_reply->atom, XCB_ATOM_INTEGER, 0, 1); get_property_reply = xcb_get_property_reply (priv->xcb_connection, get_property_cookie, NULL); if (get_property_reply == NULL) { g_debug ("no XFree86_VT property\n"); goto out; } num = ((uint32_t *) xcb_get_property_value (get_property_reply))[0]; windowpath = getenv ("WINDOWPATH"); numn = snprintf (nums, sizeof (nums), "%u", num); if (!windowpath) { newwindowpath = malloc (numn + 1); sprintf (newwindowpath, "%s", nums); } else { newwindowpath = malloc (strlen (windowpath) + 1 + numn + 1); sprintf (newwindowpath, "%s:%s", windowpath, nums); } g_setenv ("WINDOWPATH", newwindowpath, TRUE); out: g_clear_pointer (&atom_reply, free); g_clear_pointer (&get_property_reply, free); }
0
Chrome
04aaacb936a08d70862d6d9d7e8354721ae46be8
CVE-2019-5837
CWE-200
void AppCacheDatabase::ReadEntryRecord( const sql::Statement& statement, EntryRecord* record) { record->cache_id = statement.ColumnInt64(0); record->url = GURL(statement.ColumnString(1)); record->flags = statement.ColumnInt(2); record->response_id = statement.ColumnInt64(3); record->response_size = statement.ColumnInt64(4); }
1
php
4435b9142ff9813845d5c97ab29a5d637bedb257
NOT_APPLICABLE
NOT_APPLICABLE
static PHP_FUNCTION(xmlwriter_end_comment) { php_xmlwriter_end(INTERNAL_FUNCTION_PARAM_PASSTHRU, xmlTextWriterEndComment); }
0
vim
0971c7a4e537ea120a6bb2195960be8d0815e97b
CVE-2022-2207
CWE-787
ins_bs( int c, int mode, int *inserted_space_p) { linenr_T lnum; int cc; int temp = 0; // init for GCC colnr_T save_col; colnr_T mincol; int did_backspace = FALSE; int in_indent; int oldState; int cpc[MAX_MCO]; // composing characters int call_fix_indent = FALSE; /* * can't delete anything in an empty file * can't backup past first character in buffer * can't backup past starting point unless 'backspace' > 1 * can backup to a previous line if 'backspace' == 0 */ if ( BUFEMPTY() || ( #ifdef FEAT_RIGHTLEFT !revins_on && #endif ((curwin->w_cursor.lnum == 1 && curwin->w_cursor.col == 0) || (!can_bs(BS_START) && ((arrow_used #ifdef FEAT_JOB_CHANNEL && !bt_prompt(curbuf) #endif ) || (curwin->w_cursor.lnum == Insstart_orig.lnum && curwin->w_cursor.col <= Insstart_orig.col))) || (!can_bs(BS_INDENT) && !arrow_used && ai_col > 0 && curwin->w_cursor.col <= ai_col) || (!can_bs(BS_EOL) && curwin->w_cursor.col == 0)))) { vim_beep(BO_BS); return FALSE; } if (stop_arrow() == FAIL) return FALSE; in_indent = inindent(0); if (in_indent) can_cindent = FALSE; end_comment_pending = NUL; // After BS, don't auto-end comment #ifdef FEAT_RIGHTLEFT if (revins_on) // put cursor after last inserted char inc_cursor(); #endif // Virtualedit: // BACKSPACE_CHAR eats a virtual space // BACKSPACE_WORD eats all coladd // BACKSPACE_LINE eats all coladd and keeps going if (curwin->w_cursor.coladd > 0) { if (mode == BACKSPACE_CHAR) { --curwin->w_cursor.coladd; return TRUE; } if (mode == BACKSPACE_WORD) { curwin->w_cursor.coladd = 0; return TRUE; } curwin->w_cursor.coladd = 0; } /* * Delete newline! */ if (curwin->w_cursor.col == 0) { lnum = Insstart.lnum; if (curwin->w_cursor.lnum == lnum #ifdef FEAT_RIGHTLEFT || revins_on #endif ) { if (u_save((linenr_T)(curwin->w_cursor.lnum - 2), (linenr_T)(curwin->w_cursor.lnum + 1)) == FAIL) return FALSE; --Insstart.lnum; Insstart.col = (colnr_T)STRLEN(ml_get(Insstart.lnum)); } /* * In replace mode: * cc < 0: NL was inserted, delete it * cc >= 0: NL was replaced, put original characters back */ cc = -1; if (State & REPLACE_FLAG) cc = replace_pop(); // returns -1 if NL was inserted /* * In replace mode, in the line we started replacing, we only move the * cursor. */ if ((State & REPLACE_FLAG) && curwin->w_cursor.lnum <= lnum) { dec_cursor(); } else { if (!(State & VREPLACE_FLAG) || curwin->w_cursor.lnum > orig_line_count) { temp = gchar_cursor(); // remember current char --curwin->w_cursor.lnum; // When "aw" is in 'formatoptions' we must delete the space at // the end of the line, otherwise the line will be broken // again when auto-formatting. if (has_format_option(FO_AUTO) && has_format_option(FO_WHITE_PAR)) { char_u *ptr = ml_get_buf(curbuf, curwin->w_cursor.lnum, TRUE); int len; len = (int)STRLEN(ptr); if (len > 0 && ptr[len - 1] == ' ') ptr[len - 1] = NUL; } (void)do_join(2, FALSE, FALSE, FALSE, FALSE); if (temp == NUL && gchar_cursor() != NUL) inc_cursor(); } else dec_cursor(); /* * In MODE_REPLACE mode we have to put back the text that was * replaced by the NL. On the replace stack is first a * NUL-terminated sequence of characters that were deleted and then * the characters that NL replaced. */ if (State & REPLACE_FLAG) { /* * Do the next ins_char() in MODE_NORMAL state, to * prevent ins_char() from replacing characters and * avoiding showmatch(). */ oldState = State; State = MODE_NORMAL; /* * restore characters (blanks) deleted after cursor */ while (cc > 0) { save_col = curwin->w_cursor.col; mb_replace_pop_ins(cc); curwin->w_cursor.col = save_col; cc = replace_pop(); } // restore the characters that NL replaced replace_pop_ins(); State = oldState; } } did_ai = FALSE; } else { /* * Delete character(s) before the cursor. */ #ifdef FEAT_RIGHTLEFT if (revins_on) // put cursor on last inserted char dec_cursor(); #endif mincol = 0; // keep indent if (mode == BACKSPACE_LINE && (curbuf->b_p_ai || cindent_on()) #ifdef FEAT_RIGHTLEFT && !revins_on #endif ) { save_col = curwin->w_cursor.col; beginline(BL_WHITE); if (curwin->w_cursor.col < save_col) { mincol = curwin->w_cursor.col; // should now fix the indent to match with the previous line call_fix_indent = TRUE; } curwin->w_cursor.col = save_col; } /* * Handle deleting one 'shiftwidth' or 'softtabstop'. */ if ( mode == BACKSPACE_CHAR && ((p_sta && in_indent) || ((get_sts_value() != 0 #ifdef FEAT_VARTABS || tabstop_count(curbuf->b_p_vsts_array) #endif ) && curwin->w_cursor.col > 0 && (*(ml_get_cursor() - 1) == TAB || (*(ml_get_cursor() - 1) == ' ' && (!*inserted_space_p || arrow_used)))))) { int ts; colnr_T vcol; colnr_T want_vcol; colnr_T start_vcol; *inserted_space_p = FALSE; // Compute the virtual column where we want to be. Since // 'showbreak' may get in the way, need to get the last column of // the previous character. getvcol(curwin, &curwin->w_cursor, &vcol, NULL, NULL); start_vcol = vcol; dec_cursor(); getvcol(curwin, &curwin->w_cursor, NULL, NULL, &want_vcol); inc_cursor(); #ifdef FEAT_VARTABS if (p_sta && in_indent) { ts = (int)get_sw_value(curbuf); want_vcol = (want_vcol / ts) * ts; } else want_vcol = tabstop_start(want_vcol, get_sts_value(), curbuf->b_p_vsts_array); #else if (p_sta && in_indent) ts = (int)get_sw_value(curbuf); else ts = (int)get_sts_value(); want_vcol = (want_vcol / ts) * ts; #endif // delete characters until we are at or before want_vcol while (vcol > want_vcol && (cc = *(ml_get_cursor() - 1), VIM_ISWHITE(cc))) ins_bs_one(&vcol); // insert extra spaces until we are at want_vcol while (vcol < want_vcol) { // Remember the first char we inserted if (curwin->w_cursor.lnum == Insstart_orig.lnum && curwin->w_cursor.col < Insstart_orig.col) Insstart_orig.col = curwin->w_cursor.col; if (State & VREPLACE_FLAG) ins_char(' '); else { ins_str((char_u *)" "); if ((State & REPLACE_FLAG)) replace_push(NUL); } getvcol(curwin, &curwin->w_cursor, &vcol, NULL, NULL); } // If we are now back where we started delete one character. Can // happen when using 'sts' and 'linebreak'. if (vcol >= start_vcol) ins_bs_one(&vcol); } /* * Delete up to starting point, start of line or previous word. */ else { int cclass = 0, prev_cclass = 0; if (has_mbyte) cclass = mb_get_class(ml_get_cursor()); do { #ifdef FEAT_RIGHTLEFT if (!revins_on) // put cursor on char to be deleted #endif dec_cursor(); cc = gchar_cursor(); // look multi-byte character class if (has_mbyte) { prev_cclass = cclass; cclass = mb_get_class(ml_get_cursor()); } // start of word? if (mode == BACKSPACE_WORD && !vim_isspace(cc)) { mode = BACKSPACE_WORD_NOT_SPACE; temp = vim_iswordc(cc); } // end of word? else if (mode == BACKSPACE_WORD_NOT_SPACE && ((vim_isspace(cc) || vim_iswordc(cc) != temp) || prev_cclass != cclass)) { #ifdef FEAT_RIGHTLEFT if (!revins_on) #endif inc_cursor(); #ifdef FEAT_RIGHTLEFT else if (State & REPLACE_FLAG) dec_cursor(); #endif break; } if (State & REPLACE_FLAG) replace_do_bs(-1); else { if (enc_utf8 && p_deco) (void)utfc_ptr2char(ml_get_cursor(), cpc); (void)del_char(FALSE); /* * If there are combining characters and 'delcombine' is set * move the cursor back. Don't back up before the base * character. */ if (enc_utf8 && p_deco && cpc[0] != NUL) inc_cursor(); #ifdef FEAT_RIGHTLEFT if (revins_chars) { revins_chars--; revins_legal++; } if (revins_on && gchar_cursor() == NUL) break; #endif } // Just a single backspace?: if (mode == BACKSPACE_CHAR) break; } while ( #ifdef FEAT_RIGHTLEFT revins_on || #endif (curwin->w_cursor.col > mincol && (can_bs(BS_NOSTOP) || (curwin->w_cursor.lnum != Insstart_orig.lnum || curwin->w_cursor.col != Insstart_orig.col) ))); } did_backspace = TRUE; } did_si = FALSE; can_si = FALSE; can_si_back = FALSE; if (curwin->w_cursor.col <= 1) did_ai = FALSE; if (call_fix_indent) fix_indent(); /* * It's a little strange to put backspaces into the redo * buffer, but it makes auto-indent a lot easier to deal * with. */ AppendCharToRedobuff(c); // If deleted before the insertion point, adjust it if (curwin->w_cursor.lnum == Insstart_orig.lnum && curwin->w_cursor.col < Insstart_orig.col) Insstart_orig.col = curwin->w_cursor.col; // vi behaviour: the cursor moves backward but the character that // was there remains visible // Vim behaviour: the cursor moves backward and the character that // was there is erased from the screen. // We can emulate the vi behaviour by pretending there is a dollar // displayed even when there isn't. // --pkv Sun Jan 19 01:56:40 EST 2003 if (vim_strchr(p_cpo, CPO_BACKSPACE) != NULL && dollar_vcol == -1) dollar_vcol = curwin->w_virtcol; #ifdef FEAT_FOLDING // When deleting a char the cursor line must never be in a closed fold. // E.g., when 'foldmethod' is indent and deleting the first non-white // char before a Tab. if (did_backspace) foldOpenCursor(); #endif return did_backspace; }
1
vim
a6f9e300161f4cb54713da22f65b261595e8e614
NOT_APPLICABLE
NOT_APPLICABLE
pum_wanted(void) { // 'completeopt' must contain "menu" or "menuone" if (vim_strchr(p_cot, 'm') == NULL) return FALSE; // The display looks bad on a B&W display. if (t_colors < 8 #ifdef FEAT_GUI && !gui.in_use #endif ) return FALSE; return TRUE; }
0
ghostscript
c501a58f8d5650c8ba21d447c0d6f07eafcb0f15
NOT_APPLICABLE
NOT_APPLICABLE
static void Ins_SRP0( INS_ARG ) { CUR.GS.rp0 = (Int)(args[0]); }
0
exiv2
c72d16f4c402a8acc2dfe06fe3d58bf6cf99069e
NOT_APPLICABLE
NOT_APPLICABLE
uint32_t fillGap(Exiv2::Internal::IoWrapper& ioWrapper, uint32_t curr, uint32_t tobe) { if (curr < tobe) { Exiv2::DataBuf buf(tobe - curr); memset(buf.pData_, 0x0, buf.size_); ioWrapper.write(buf.pData_, buf.size_); return tobe - curr; } return 0; } // fillGap
0
Chrome
a8d6ae61d266d8bc44c3dd2d08bda32db701e359
NOT_APPLICABLE
NOT_APPLICABLE
bool DownloadItemImpl::GetAutoOpened() { return auto_opened_; }
0
Chrome
6b96dd532af164a73f2aac757bafff58211aca2c
NOT_APPLICABLE
NOT_APPLICABLE
jboolean WebContentsAndroid::IsShowingInterstitialPage(JNIEnv* env, jobject obj) { return web_contents_->ShowingInterstitialPage(); }
0
linux
5d26a105b5a73e5635eae0629b42fa0a90e07b7b
NOT_APPLICABLE
NOT_APPLICABLE
static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx, int do_cont_test) { unsigned char *ptr = buf; unsigned int byte_count = (unsigned int)nbytes; int err; spin_lock_bh(&ctx->prng_lock); err = -EINVAL; if (ctx->flags & PRNG_NEED_RESET) goto done; /* * If the FIXED_SIZE flag is on, only return whole blocks of * pseudo random data */ err = -EINVAL; if (ctx->flags & PRNG_FIXED_SIZE) { if (nbytes < DEFAULT_BLK_SZ) goto done; byte_count = DEFAULT_BLK_SZ; } err = byte_count; dbgprint(KERN_CRIT "getting %d random bytes for context %p\n", byte_count, ctx); remainder: if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { memset(buf, 0, nbytes); err = -EINVAL; goto done; } } /* * Copy any data less than an entire block */ if (byte_count < DEFAULT_BLK_SZ) { empty_rbuf: while (ctx->rand_data_valid < DEFAULT_BLK_SZ) { *ptr = ctx->rand_data[ctx->rand_data_valid]; ptr++; byte_count--; ctx->rand_data_valid++; if (byte_count == 0) goto done; } } /* * Now copy whole blocks */ for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { memset(buf, 0, nbytes); err = -EINVAL; goto done; } } if (ctx->rand_data_valid > 0) goto empty_rbuf; memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ); ctx->rand_data_valid += DEFAULT_BLK_SZ; ptr += DEFAULT_BLK_SZ; } /* * Now go back and get any remaining partial block */ if (byte_count) goto remainder; done: spin_unlock_bh(&ctx->prng_lock); dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", err, ctx); return err; }
0
php-src
c395c6e5d7e8df37a21265ff76e48fe75ceb5ae6?w=1
NOT_APPLICABLE
NOT_APPLICABLE
static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc) { int i, l, x1, x2, dy; int oc; /* old pixel value */ int wx2,wy2; /* stack of filled segments */ struct seg *stack; struct seg *sp; char **pts; if (!im->tile) { return; } wx2=im->sx;wy2=im->sy; nc = gdImageTileGet(im,x,y); pts = (char **) ecalloc(im->sy + 1, sizeof(char *)); for (i = 0; i < im->sy + 1; i++) { pts[i] = (char *) ecalloc(im->sx + 1, sizeof(char)); } stack = (struct seg *)safe_emalloc(sizeof(struct seg), ((int)(im->sy*im->sx)/4), 1); sp = stack; oc = gdImageGetPixel(im, x, y); /* required! */ FILL_PUSH(y,x,x,1); /* seed segment (popped 1st) */ FILL_PUSH(y+1, x, x, -1); while (sp>stack) { FILL_POP(y, x1, x2, dy); for (x=x1; x>=0 && (!pts[y][x] && gdImageGetPixel(im,x,y)==oc); x--) { nc = gdImageTileGet(im,x,y); pts[y][x] = 1; gdImageSetPixel(im,x, y, nc); } if (x>=x1) { goto skip; } l = x+1; /* leak on left? */ if (l<x1) { FILL_PUSH(y, l, x1-1, -dy); } x = x1+1; do { for(; x<wx2 && (!pts[y][x] && gdImageGetPixel(im,x, y)==oc); x++) { nc = gdImageTileGet(im,x,y); pts[y][x] = 1; gdImageSetPixel(im, x, y, nc); } FILL_PUSH(y, l, x-1, dy); /* leak on right? */ if (x>x2+1) { FILL_PUSH(y, x2+1, x-1, -dy); } skip: for(x++; x<=x2 && (pts[y][x] || gdImageGetPixel(im,x, y)!=oc); x++); l = x; } while (x<=x2); } for(i = 0; i < im->sy + 1; i++) { efree(pts[i]); } efree(pts); efree(stack); }
0
Chrome
fa76a9f7ef6a028f83f97c181b150ecfd2b13be1
CVE-2018-16085
CWE-416
void CoordinatorImpl::GetVmRegionsForHeapProfiler( const std::vector<base::ProcessId>& pids, GetVmRegionsForHeapProfilerCallback callback) { DCHECK_CALLED_ON_VALID_THREAD(thread_checker_); uint64_t dump_guid = ++next_dump_id_; std::unique_ptr<QueuedVmRegionRequest> request = std::make_unique<QueuedVmRegionRequest>(dump_guid, std::move(callback)); in_progress_vm_region_requests_[dump_guid] = std::move(request); std::vector<QueuedRequestDispatcher::ClientInfo> clients; for (const auto& kv : clients_) { auto client_identity = kv.second->identity; const base::ProcessId pid = GetProcessIdForClientIdentity(client_identity); clients.emplace_back(kv.second->client.get(), pid, kv.second->process_type); } QueuedVmRegionRequest* request_ptr = in_progress_vm_region_requests_[dump_guid].get(); auto os_callback = base::BindRepeating(&CoordinatorImpl::OnOSMemoryDumpForVMRegions, base::Unretained(this), dump_guid); QueuedRequestDispatcher::SetUpAndDispatchVmRegionRequest(request_ptr, clients, pids, os_callback); FinalizeVmRegionDumpIfAllManagersReplied(dump_guid); }
1
cantata
afc4f8315d3e96574925fb530a7004cc9e6ce3d3
NOT_APPLICABLE
NOT_APPLICABLE
qint64 RemoteFsDevice::freeSpace() { if (!isConnected()) { // || !details.isLocalFile()) { return 0; } spaceInfo.setPath(mountPoint(details, false)); if (isOldSshfs()) { return 0; } return spaceInfo.size()-spaceInfo.used(); }
0
server
807945f2eb5fa22e6f233cc17b85a2e141efe2c8
NOT_APPLICABLE
NOT_APPLICABLE
bool check_vcol_func_processor(void *arg) { context= 0; if (field && (field->unireg_check == Field::NEXT_NUMBER)) { // Auto increment fields are unsupported return mark_unsupported_function(field_name.str, arg, VCOL_FIELD_REF | VCOL_AUTO_INC); } return mark_unsupported_function(field_name.str, arg, VCOL_FIELD_REF); }
0
linux
b4b814fec1a5a849383f7b3886b654a13abbda7d
NOT_APPLICABLE
NOT_APPLICABLE
iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt, struct iwl_fw_dump_ptrs *fw_error_dump) { struct iwl_fw_error_dump_file *dump_file; struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg; struct iwl_fw_error_dump_trigger_desc *dump_trig; u32 sram_len, sram_ofs; const struct iwl_fw_dbg_mem_seg_tlv *fw_mem = fwrt->fw->dbg.mem_tlv; struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; u32 file_len, fifo_len = 0, prph_len = 0, radio_len = 0; u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len; int i; /* SRAM - include stack CCM if driver knows the values for it */ if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX) return NULL; img = &fwrt->fw->img[fwrt->cur_fw_img]; sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; } else { sram_ofs = fwrt->trans->cfg->dccm_offset; sram_len = fwrt->trans->cfg->dccm_len; } /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg); fifo_len += iwl_fw_txf_len(fwrt, mem_cfg); /* Make room for PRPH registers */ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) iwl_fw_prph_handler(fwrt, &prph_len, iwl_fw_get_prph_len); if (fwrt->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 && iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) file_len += sizeof(*dump_data) + sizeof(*dump_info); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { size_t hdr_len = sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_mem); /* Dump SRAM only if no mem_tlvs */ if (!fwrt->fw->dbg.n_mem_tlv) ADD_LEN(file_len, sram_len, hdr_len); /* Make room for all mem types that exist */ ADD_LEN(file_len, smem_len, hdr_len); ADD_LEN(file_len, sram2_len, hdr_len); for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) ADD_LEN(file_len, le32_to_cpu(fw_mem[i].len), hdr_len); } /* Make room for fw's virtual image pages, if it exists */ if (iwl_fw_dbg_is_paging_enabled(fwrt)) file_len += fwrt->num_of_paging_blk * (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE); if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { file_len += sizeof(*dump_data) + fwrt->trans->cfg->d3_debug_data_length * 2; } /* If we only want a monitor dump, reset the file length */ if (fwrt->dump.monitor_only) { file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + sizeof(*dump_info) + sizeof(*dump_smem_cfg); } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + fwrt->dump.desc->len; dump_file = vzalloc(file_len); if (!dump_file) return NULL; fw_error_dump->fwrt_ptr = dump_file; dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; dump_info->hw_type = cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev)); dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, sizeof(dump_info->fw_human_readable)); strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, sizeof(dump_info->dev_human_readable) - 1); strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable) - 1); dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs; dump_info->lmac_err_id[0] = cpu_to_le32(fwrt->dump.lmac_err_id[0]); if (fwrt->smem_cfg.num_lmacs > 1) dump_info->lmac_err_id[1] = cpu_to_le32(fwrt->dump.lmac_err_id[1]); dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id); dump_data = iwl_fw_error_next_data(dump_data); } if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) { /* Dump shared memory configuration */ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); dump_smem_cfg = (void *)dump_data->data; dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); dump_smem_cfg->num_txfifo_entries = cpu_to_le32(mem_cfg->num_txfifo_entries); for (i = 0; i < MAX_NUM_LMAC; i++) { int j; u32 *txf_size = mem_cfg->lmac[i].txfifo_size; for (j = 0; j < TX_FIFO_MAX_NUM; j++) dump_smem_cfg->lmac[i].txfifo_size[j] = cpu_to_le32(txf_size[j]); dump_smem_cfg->lmac[i].rxfifo1_size = cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); } dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size); dump_smem_cfg->internal_txfifo_addr = cpu_to_le32(mem_cfg->internal_txfifo_addr); for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { dump_smem_cfg->internal_txfifo_size[i] = cpu_to_le32(mem_cfg->internal_txfifo_size[i]); } dump_data = iwl_fw_error_next_data(dump_data); } /* We only dump the FIFOs if the FW is in error state */ if (fifo_len) { iwl_fw_dump_rxf(fwrt, &dump_data); iwl_fw_dump_txf(fwrt, &dump_data); } if (radio_len) iwl_read_radio_regs(fwrt, &dump_data); if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + fwrt->dump.desc->len); dump_trig = (void *)dump_data->data; memcpy(dump_trig, &fwrt->dump.desc->trig_desc, sizeof(*dump_trig) + fwrt->dump.desc->len); dump_data = iwl_fw_error_next_data(dump_data); } /* In case we only want monitor dump, skip to dump trasport data */ if (fwrt->dump.monitor_only) goto out; if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg.mem_tlv; if (!fwrt->fw->dbg.n_mem_tlv) iwl_fw_dump_mem(fwrt, &dump_data, sram_len, sram_ofs, IWL_FW_ERROR_DUMP_MEM_SRAM); for (i = 0; i < fwrt->fw->dbg.n_mem_tlv; i++) { u32 len = le32_to_cpu(fw_dbg_mem[i].len); u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); iwl_fw_dump_mem(fwrt, &dump_data, len, ofs, le32_to_cpu(fw_dbg_mem[i].data_type)); } iwl_fw_dump_mem(fwrt, &dump_data, smem_len, fwrt->trans->cfg->smem_offset, IWL_FW_ERROR_DUMP_MEM_SMEM); iwl_fw_dump_mem(fwrt, &dump_data, sram2_len, fwrt->trans->cfg->dccm2_offset, IWL_FW_ERROR_DUMP_MEM_SRAM); } if (iwl_fw_dbg_is_d3_debug_enabled(fwrt) && fwrt->dump.d3_debug_data) { u32 addr = fwrt->trans->cfg->d3_debug_data_base_addr; size_t data_size = fwrt->trans->cfg->d3_debug_data_length; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); dump_data->len = cpu_to_le32(data_size * 2); memcpy(dump_data->data, fwrt->dump.d3_debug_data, data_size); kfree(fwrt->dump.d3_debug_data); fwrt->dump.d3_debug_data = NULL; iwl_trans_read_mem_bytes(fwrt->trans, addr, dump_data->data + data_size, data_size); dump_data = iwl_fw_error_next_data(dump_data); } /* Dump fw's virtual image */ if (iwl_fw_dbg_is_paging_enabled(fwrt)) iwl_dump_paging(fwrt, &dump_data); if (prph_len) iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph); out: dump_file->file_len = cpu_to_le32(file_len); return dump_file; }
0
linux
f3747379accba8e95d70cec0eae0582c8c182050
NOT_APPLICABLE
NOT_APPLICABLE
static int em_sysexit(struct x86_emulate_ctxt *ctxt) { const struct x86_emulate_ops *ops = ctxt->ops; struct desc_struct cs, ss; u64 msr_data, rcx, rdx; int usermode; u16 cs_sel = 0, ss_sel = 0; /* inject #GP if in real mode or Virtual 8086 mode */ if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) return emulate_gp(ctxt, 0); setup_syscalls_segments(ctxt, &cs, &ss); if ((ctxt->rex_prefix & 0x8) != 0x0) usermode = X86EMUL_MODE_PROT64; else usermode = X86EMUL_MODE_PROT32; rcx = reg_read(ctxt, VCPU_REGS_RCX); rdx = reg_read(ctxt, VCPU_REGS_RDX); cs.dpl = 3; ss.dpl = 3; ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); switch (usermode) { case X86EMUL_MODE_PROT32: cs_sel = (u16)(msr_data + 16); if ((msr_data & 0xfffc) == 0x0) return emulate_gp(ctxt, 0); ss_sel = (u16)(msr_data + 24); rcx = (u32)rcx; rdx = (u32)rdx; break; case X86EMUL_MODE_PROT64: cs_sel = (u16)(msr_data + 32); if (msr_data == 0x0) return emulate_gp(ctxt, 0); ss_sel = cs_sel + 8; cs.d = 0; cs.l = 1; if (is_noncanonical_address(rcx) || is_noncanonical_address(rdx)) return emulate_gp(ctxt, 0); break; } cs_sel |= SELECTOR_RPL_MASK; ss_sel |= SELECTOR_RPL_MASK; ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ctxt->_eip = rdx; *reg_write(ctxt, VCPU_REGS_RSP) = rcx; return X86EMUL_CONTINUE; }
0
server
2e7891080667c59ac80f788eef4d59d447595772
NOT_APPLICABLE
NOT_APPLICABLE
virtual Item *expr_cache_insert_transformer(THD *thd, uchar *unused) { return this; }
0
Chrome
bf6a6765d44b09c64b8c75d749efb84742a250e7
NOT_APPLICABLE
NOT_APPLICABLE
void PDFiumEngine::PageOffsetUpdated(const pp::Point& page_offset) { page_offset_ = page_offset; }
0
mongo-c-driver
0d9a4d98bfdf4acd2c0138d4aaeb4e2e0934bd84
NOT_APPLICABLE
NOT_APPLICABLE
bson_iter_decimal128 (const bson_iter_t *iter, /* IN */ bson_decimal128_t *dec) /* OUT */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_DECIMAL128) { bson_iter_decimal128_unsafe (iter, dec); return true; } return false; }
0
linux
f6d8bd051c391c1c0458a30b2a7abcd939329259
NOT_APPLICABLE
NOT_APPLICABLE
static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, const u32 rnd, const u32 synq_hsize) { return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1); }
0
Chrome
3bfe67c9c4b45eb713326aae7a67c8f7390dae08
NOT_APPLICABLE
NOT_APPLICABLE
static void compileoptionusedFunc( sqlite3_context *context, int argc, sqlite3_value **argv ){ const char *zOptName; assert( argc==1 ); UNUSED_PARAMETER(argc); /* IMP: R-39564-36305 The sqlite_compileoption_used() SQL ** function is a wrapper around the sqlite3_compileoption_used() C/C++ ** function. */ if( (zOptName = (const char*)sqlite3_value_text(argv[0]))!=0 ){ sqlite3_result_int(context, sqlite3_compileoption_used(zOptName)); } }
0
Chrome
6b5f83842b5edb5d4bd6684b196b3630c6769731
NOT_APPLICABLE
NOT_APPLICABLE
void ExtensionSettingsHandler::MaybeUpdateAfterNotification() { WebContents* contents = web_ui()->web_contents(); if (!ignore_notifications_ && contents && contents->GetRenderViewHost()) HandleRequestExtensionsData(NULL); deleting_rvh_ = NULL; }
0
gpac
8e585e623b1d666b4ef736ed609264639cb27701
NOT_APPLICABLE
NOT_APPLICABLE
GF_Err gf_isom_box_write_header(GF_Box *ptr, GF_BitStream *bs) { u64 start; if (! bs || !ptr) return GF_BAD_PARAM; if (!ptr->size) return GF_ISOM_INVALID_FILE; start = gf_bs_get_position(bs); if (ptr->size > 0xFFFFFFFF) { gf_bs_write_u32(bs, 1); } else { gf_bs_write_u32(bs, (u32) ptr->size); } gf_bs_write_u32(bs, ptr->type); if (ptr->type == GF_ISOM_BOX_TYPE_UUID) { u32 i; char uuid[16]; char strUUID[32]; switch (((GF_UUIDBox*)ptr)->internal_4cc) { case GF_ISOM_BOX_UUID_TENC: memcpy(strUUID, "8974dbce7be74c5184f97148f9882554", 32); break; case GF_ISOM_BOX_UUID_PSEC: memcpy(strUUID, "A2394F525A9B4F14A2446C427C648DF4", 32); break; case GF_ISOM_BOX_UUID_MSSM: memcpy(strUUID, "A5D40B30E81411DDBA2F0800200C9A66", 32); break; case GF_ISOM_BOX_UUID_PSSH: memcpy(strUUID, "D08A4F1810F34A82B6C832D8ABA183D3", 32); break; case GF_ISOM_BOX_UUID_TFXD: memcpy(strUUID, "6D1D9B0542D544E680E2141DAFF757B2", 32); break; default: memset(strUUID, 0, 32); break; } for (i = 0; i < 16; i++) { char t[3]; t[2] = 0; t[0] = strUUID[2*i]; t[1] = strUUID[2*i+1]; uuid[i] = (u8) strtol(t, NULL, 16); } gf_bs_write_data(bs, uuid, 16); } if (ptr->size > 0xFFFFFFFF) gf_bs_write_u64(bs, ptr->size); GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[iso file] Written Box type %s size "LLD" start "LLD"\n", gf_4cc_to_str(ptr->type), LLD_CAST ptr->size, LLD_CAST start)); return GF_OK; }
0
qemu
aab0e2a661b2b6bf7915c0aefe807fb60d6d9d13
NOT_APPLICABLE
NOT_APPLICABLE
static void ati_cursor_draw_line(VGACommonState *vga, uint8_t *d, int scr_y) { ATIVGAState *s = container_of(vga, ATIVGAState, vga); uint32_t srcoff; uint32_t *dp = (uint32_t *)d; int i, j, h; if (!(s->regs.crtc_gen_cntl & CRTC2_CUR_EN) || scr_y < vga->hw_cursor_y || scr_y >= vga->hw_cursor_y + 64 || scr_y > s->regs.crtc_v_total_disp >> 16) { return; } /* FIXME handle cur_hv_offs correctly */ srcoff = s->cursor_offset + (scr_y - vga->hw_cursor_y) * 16; dp = &dp[vga->hw_cursor_x]; h = ((s->regs.crtc_h_total_disp >> 16) + 1) * 8; for (i = 0; i < 8; i++) { uint32_t color; uint8_t abits = vga_read_byte(vga, srcoff + i); uint8_t xbits = vga_read_byte(vga, srcoff + i + 8); for (j = 0; j < 8; j++, abits <<= 1, xbits <<= 1) { if (abits & BIT(7)) { if (xbits & BIT(7)) { color = dp[i * 8 + j] ^ 0xffffffff; /* complement */ } else { continue; /* transparent, no change */ } } else { color = (xbits & BIT(7) ? s->regs.cur_color1 : s->regs.cur_color0) | 0xff000000; } if (vga->hw_cursor_x + i * 8 + j >= h) { return; /* end of screen, don't span to next line */ } dp[i * 8 + j] = color; } } }
0
znc
d229761821da38d984a9e4098ad96842490dc001
NOT_APPLICABLE
NOT_APPLICABLE
void CClient::ReachedMaxBuffer() { DEBUG(GetSockName() << " == ReachedMaxBuffer()"); if (IsAttached()) { PutClient("ERROR :" + t_s("Closing link: Too long raw line")); } Close(); }
0
linux
78c9c4dfbf8c04883941445a195276bb4bb92c76
NOT_APPLICABLE
NOT_APPLICABLE
static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec64 *tp) { const pid_t pid = CPUCLOCK_PID(which_clock); int err = -EINVAL; if (pid == 0) { /* * Special case constant value for our own clocks. * We don't have to do any lookup to find ourselves. */ err = posix_cpu_clock_get_task(current, which_clock, tp); } else { /* * Find the given PID, and validate that the caller * should be able to see it. */ struct task_struct *p; rcu_read_lock(); p = find_task_by_vpid(pid); if (p) err = posix_cpu_clock_get_task(p, which_clock, tp); rcu_read_unlock(); } return err; }
0
libx11
388b303c62aa35a245f1704211a023440ad2c488
CVE-2020-14344
CWE-190
_XimAttributeToValue( Xic ic, XIMResourceList res, CARD16 *data, INT16 data_len, XPointer value, BITMASK32 mode) { switch (res->resource_size) { case XimType_SeparatorOfNestedList: case XimType_NEST: break; case XimType_CARD8: case XimType_CARD16: case XimType_CARD32: case XimType_Window: case XimType_XIMHotKeyState: _XCopyToArg((XPointer)data, (XPointer *)&value, data_len); break; case XimType_STRING8: { char *str; if (!(value)) return False; if (!(str = Xmalloc(data_len + 1))) return False; (void)memcpy(str, (char *)data, data_len); str[data_len] = '\0'; *((char **)value) = str; break; } case XimType_XIMStyles: { INT16 num = data[0]; register CARD32 *style_list = (CARD32 *)&data[2]; XIMStyle *style; XIMStyles *rep; register int i; char *p; int alloc_len; if (!(value)) return False; alloc_len = sizeof(XIMStyles) + sizeof(XIMStyle) * num; if (!(p = Xmalloc(alloc_len))) return False; rep = (XIMStyles *)p; style = (XIMStyle *)(p + sizeof(XIMStyles)); for (i = 0; i < num; i++) style[i] = (XIMStyle)style_list[i]; rep->count_styles = (unsigned short)num; rep->supported_styles = style; *((XIMStyles **)value) = rep; break; } case XimType_XRectangle: { XRectangle *rep; if (!(value)) return False; if (!(rep = Xmalloc(sizeof(XRectangle)))) return False; rep->x = data[0]; rep->y = data[1]; rep->width = data[2]; rep->height = data[3]; *((XRectangle **)value) = rep; break; } case XimType_XPoint: { XPoint *rep; if (!(value)) return False; if (!(rep = Xmalloc(sizeof(XPoint)))) return False; rep->x = data[0]; rep->y = data[1]; *((XPoint **)value) = rep; break; } case XimType_XFontSet: { INT16 len = data[0]; char *base_name; XFontSet rep = (XFontSet)NULL; char **missing_list = NULL; int missing_count; char *def_string; if (!(value)) return False; if (!ic) return False; if (!(base_name = Xmalloc(len + 1))) return False; (void)strncpy(base_name, (char *)&data[1], (int)len); base_name[len] = '\0'; if (mode & XIM_PREEDIT_ATTR) { if (!strcmp(base_name, ic->private.proto.preedit_font)) { rep = ic->core.preedit_attr.fontset; } else if (!ic->private.proto.preedit_font_length) { rep = XCreateFontSet(ic->core.im->core.display, base_name, &missing_list, &missing_count, &def_string); } } else if (mode & XIM_STATUS_ATTR) { if (!strcmp(base_name, ic->private.proto.status_font)) { rep = ic->core.status_attr.fontset; } else if (!ic->private.proto.status_font_length) { rep = XCreateFontSet(ic->core.im->core.display, base_name, &missing_list, &missing_count, &def_string); } } Xfree(base_name); Xfree(missing_list); *((XFontSet *)value) = rep; break; } case XimType_XIMHotKeyTriggers: { INT32 num = *((CARD32 *)data); register CARD32 *key_list = (CARD32 *)&data[2]; XIMHotKeyTrigger *key; XIMHotKeyTriggers *rep; register int i; char *p; int alloc_len; if (!(value)) return False; alloc_len = sizeof(XIMHotKeyTriggers) + sizeof(XIMHotKeyTrigger) * num; if (!(p = Xmalloc(alloc_len))) return False; rep = (XIMHotKeyTriggers *)p; key = (XIMHotKeyTrigger *)(p + sizeof(XIMHotKeyTriggers)); for (i = 0; i < num; i++, key_list += 3) { key[i].keysym = (KeySym)key_list[0]; /* keysym */ key[i].modifier = (int)key_list[1]; /* modifier */ key[i].modifier_mask = (int)key_list[2]; /* modifier_mask */ } rep->num_hot_key = (int)num; rep->key = key; *((XIMHotKeyTriggers **)value) = rep; break; } case XimType_XIMStringConversion: { break; } default: return False; } return True; }
1
Chrome
6d9425ec7badda912555d46ea7abcfab81fdd9b9
NOT_APPLICABLE
NOT_APPLICABLE
void BrowserViewRenderer::RegisterWithWebContents( content::WebContents* web_contents) { web_contents->SetUserData(kBrowserViewRendererUserDataKey, new BrowserViewRendererUserData(this)); }
0
linux
f0d1bec9d58d4c038d0ac958c9af82be6eb18045
NOT_APPLICABLE
NOT_APPLICABLE
static int fifo_open(struct inode *inode, struct file *filp) { struct pipe_inode_info *pipe; bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC; int ret; filp->f_version = 0; spin_lock(&inode->i_lock); if (inode->i_pipe) { pipe = inode->i_pipe; pipe->files++; spin_unlock(&inode->i_lock); } else { spin_unlock(&inode->i_lock); pipe = alloc_pipe_info(); if (!pipe) return -ENOMEM; pipe->files = 1; spin_lock(&inode->i_lock); if (unlikely(inode->i_pipe)) { inode->i_pipe->files++; spin_unlock(&inode->i_lock); free_pipe_info(pipe); pipe = inode->i_pipe; } else { inode->i_pipe = pipe; spin_unlock(&inode->i_lock); } } filp->private_data = pipe; /* OK, we have a pipe and it's pinned down */ __pipe_lock(pipe); /* We can only do regular read/write on fifos */ filp->f_mode &= (FMODE_READ | FMODE_WRITE); switch (filp->f_mode) { case FMODE_READ: /* * O_RDONLY * POSIX.1 says that O_NONBLOCK means return with the FIFO * opened, even when there is no process writing the FIFO. */ pipe->r_counter++; if (pipe->readers++ == 0) wake_up_partner(pipe); if (!is_pipe && !pipe->writers) { if ((filp->f_flags & O_NONBLOCK)) { /* suppress POLLHUP until we have * seen a writer */ filp->f_version = pipe->w_counter; } else { if (wait_for_partner(pipe, &pipe->w_counter)) goto err_rd; } } break; case FMODE_WRITE: /* * O_WRONLY * POSIX.1 says that O_NONBLOCK means return -1 with * errno=ENXIO when there is no process reading the FIFO. */ ret = -ENXIO; if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers) goto err; pipe->w_counter++; if (!pipe->writers++) wake_up_partner(pipe); if (!is_pipe && !pipe->readers) { if (wait_for_partner(pipe, &pipe->r_counter)) goto err_wr; } break; case FMODE_READ | FMODE_WRITE: /* * O_RDWR * POSIX.1 leaves this case "undefined" when O_NONBLOCK is set. * This implementation will NEVER block on a O_RDWR open, since * the process can at least talk to itself. */ pipe->readers++; pipe->writers++; pipe->r_counter++; pipe->w_counter++; if (pipe->readers == 1 || pipe->writers == 1) wake_up_partner(pipe); break; default: ret = -EINVAL; goto err; } /* Ok! */ __pipe_unlock(pipe); return 0; err_rd: if (!--pipe->readers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err_wr: if (!--pipe->writers) wake_up_interruptible(&pipe->wait); ret = -ERESTARTSYS; goto err; err: __pipe_unlock(pipe); put_pipe_info(inode, pipe); return ret; }
0
radare2
62e39f34b2705131a2d08aff0c2e542c6a52cf0e
NOT_APPLICABLE
NOT_APPLICABLE
int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; }
0
systemd-stable
764b74113e36ac5219a4b82a05f311b5a92136ce
NOT_APPLICABLE
NOT_APPLICABLE
int slice_build_subslice(const char *slice, const char *name, char **ret) { char *subslice; assert(slice); assert(name); assert(ret); if (!slice_name_is_valid(slice)) return -EINVAL; if (!unit_prefix_is_valid(name)) return -EINVAL; if (streq(slice, SPECIAL_ROOT_SLICE)) subslice = strjoin(name, ".slice"); else { char *e; assert_se(e = endswith(slice, ".slice")); subslice = new(char, (e - slice) + 1 + strlen(name) + 6 + 1); if (!subslice) return -ENOMEM; stpcpy(stpcpy(stpcpy(mempcpy(subslice, slice, e - slice), "-"), name), ".slice"); } *ret = subslice; return 0; }
0
linux
fcdf445ff42f036d22178b49cf64e92d527c1330
NOT_APPLICABLE
NOT_APPLICABLE
static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node, const struct divs_data *data) { struct clk_onecell_data *clk_data; const char *parent; const char *clk_name; struct clk **clks, *pclk; struct clk_hw *gate_hw, *rate_hw; const struct clk_ops *rate_ops; struct clk_gate *gate = NULL; struct clk_fixed_factor *fix_factor; struct clk_divider *divider; struct factors_data factors = *data->factors; char *derived_name = NULL; void __iomem *reg; int ndivs = SUNXI_DIVS_MAX_QTY, i = 0; int flags, clkflags; /* if number of children known, use it */ if (data->ndivs) ndivs = data->ndivs; /* Try to find a name for base factor clock */ for (i = 0; i < ndivs; i++) { if (data->div[i].self) { of_property_read_string_index(node, "clock-output-names", i, &factors.name); break; } } /* If we don't have a .self clk use the first output-name up to '_' */ if (factors.name == NULL) { char *endp; of_property_read_string_index(node, "clock-output-names", 0, &clk_name); endp = strchr(clk_name, '_'); if (endp) { derived_name = kstrndup(clk_name, endp - clk_name, GFP_KERNEL); if (!derived_name) return NULL; factors.name = derived_name; } else { factors.name = clk_name; } } /* Set up factor clock that we will be dividing */ pclk = sunxi_factors_clk_setup(node, &factors); if (!pclk) return NULL; parent = __clk_get_name(pclk); kfree(derived_name); reg = of_iomap(node, 0); if (!reg) { pr_err("Could not map registers for divs-clk: %pOF\n", node); return NULL; } clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); if (!clk_data) goto out_unmap; clks = kcalloc(ndivs, sizeof(*clks), GFP_KERNEL); if (!clks) goto free_clkdata; clk_data->clks = clks; /* It's not a good idea to have automatic reparenting changing * our RAM clock! */ clkflags = !strcmp("pll5", parent) ? 0 : CLK_SET_RATE_PARENT; for (i = 0; i < ndivs; i++) { if (of_property_read_string_index(node, "clock-output-names", i, &clk_name) != 0) break; /* If this is the base factor clock, only update clks */ if (data->div[i].self) { clk_data->clks[i] = pclk; continue; } gate_hw = NULL; rate_hw = NULL; rate_ops = NULL; /* If this leaf clock can be gated, create a gate */ if (data->div[i].gate) { gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) goto free_clks; gate->reg = reg; gate->bit_idx = data->div[i].gate; gate->lock = &clk_lock; gate_hw = &gate->hw; } /* Leaves can be fixed or configurable divisors */ if (data->div[i].fixed) { fix_factor = kzalloc(sizeof(*fix_factor), GFP_KERNEL); if (!fix_factor) goto free_gate; fix_factor->mult = 1; fix_factor->div = data->div[i].fixed; rate_hw = &fix_factor->hw; rate_ops = &clk_fixed_factor_ops; } else { divider = kzalloc(sizeof(*divider), GFP_KERNEL); if (!divider) goto free_gate; flags = data->div[i].pow ? CLK_DIVIDER_POWER_OF_TWO : 0; divider->reg = reg; divider->shift = data->div[i].shift; divider->width = SUNXI_DIVISOR_WIDTH; divider->flags = flags; divider->lock = &clk_lock; divider->table = data->div[i].table; rate_hw = &divider->hw; rate_ops = &clk_divider_ops; } /* Wrap the (potential) gate and the divisor on a composite * clock to unify them */ clks[i] = clk_register_composite(NULL, clk_name, &parent, 1, NULL, NULL, rate_hw, rate_ops, gate_hw, &clk_gate_ops, clkflags | data->div[i].critical ? CLK_IS_CRITICAL : 0); WARN_ON(IS_ERR(clk_data->clks[i])); } /* Adjust to the real max */ clk_data->clk_num = i; if (of_clk_add_provider(node, of_clk_src_onecell_get, clk_data)) { pr_err("%s: failed to add clock provider for %s\n", __func__, clk_name); goto free_gate; } return clks; free_gate: kfree(gate); free_clks: kfree(clks); free_clkdata: kfree(clk_data); out_unmap: iounmap(reg); return NULL; }
0
php-src
a15bffd105ac28fd0dd9b596632dbf035238fda3
NOT_APPLICABLE
NOT_APPLICABLE
static zend_bool php_auto_globals_create_server(zend_string *name) { if (PG(variables_order) && (strchr(PG(variables_order),'S') || strchr(PG(variables_order),'s'))) { php_register_server_variables(); if (PG(register_argc_argv)) { if (SG(request_info).argc) { zval *argc, *argv; if ((argc = zend_hash_str_find_ind(&EG(symbol_table), "argc", sizeof("argc")-1)) != NULL && (argv = zend_hash_str_find_ind(&EG(symbol_table), "argv", sizeof("argv")-1)) != NULL) { Z_ADDREF_P(argv); zend_hash_str_update(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]), "argv", sizeof("argv")-1, argv); zend_hash_str_update(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER]), "argc", sizeof("argc")-1, argc); } } else { php_build_argv(SG(request_info).query_string, &PG(http_globals)[TRACK_VARS_SERVER]); } } } else { zval_ptr_dtor(&PG(http_globals)[TRACK_VARS_SERVER]); array_init(&PG(http_globals)[TRACK_VARS_SERVER]); } check_http_proxy(Z_ARRVAL(PG(http_globals)[TRACK_VARS_SERVER])); zend_hash_update(&EG(symbol_table), name, &PG(http_globals)[TRACK_VARS_SERVER]); Z_ADDREF(PG(http_globals)[TRACK_VARS_SERVER]); return 0; /* don't rearm */ }
0
linux
12ca6ad2e3a896256f086497a7c7406a547ee373
NOT_APPLICABLE
NOT_APPLICABLE
void perf_prepare_sample(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event, struct pt_regs *regs) { u64 sample_type = event->attr.sample_type; header->type = PERF_RECORD_SAMPLE; header->size = sizeof(*header) + event->header_size; header->misc = 0; header->misc |= perf_misc_flags(regs); __perf_event_header__init_id(header, data, event); if (sample_type & PERF_SAMPLE_IP) data->ip = perf_instruction_pointer(regs); if (sample_type & PERF_SAMPLE_CALLCHAIN) { int size = 1; data->callchain = perf_callchain(event, regs); if (data->callchain) size += data->callchain->nr; header->size += size * sizeof(u64); } if (sample_type & PERF_SAMPLE_RAW) { int size = sizeof(u32); if (data->raw) size += data->raw->size; else size += sizeof(u32); header->size += round_up(size, sizeof(u64)); } if (sample_type & PERF_SAMPLE_BRANCH_STACK) { int size = sizeof(u64); /* nr */ if (data->br_stack) { size += data->br_stack->nr * sizeof(struct perf_branch_entry); } header->size += size; } if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) perf_sample_regs_user(&data->regs_user, regs, &data->regs_user_copy); if (sample_type & PERF_SAMPLE_REGS_USER) { /* regs dump ABI info */ int size = sizeof(u64); if (data->regs_user.regs) { u64 mask = event->attr.sample_regs_user; size += hweight64(mask) * sizeof(u64); } header->size += size; } if (sample_type & PERF_SAMPLE_STACK_USER) { /* * Either we need PERF_SAMPLE_STACK_USER bit to be allways * processed as the last one or have additional check added * in case new sample type is added, because we could eat * up the rest of the sample size. */ u16 stack_size = event->attr.sample_stack_user; u16 size = sizeof(u64); stack_size = perf_sample_ustack_size(stack_size, header->size, data->regs_user.regs); /* * If there is something to dump, add space for the dump * itself and for the field that tells the dynamic size, * which is how many have been actually dumped. */ if (stack_size) size += sizeof(u64) + stack_size; data->stack_user_size = stack_size; header->size += size; } if (sample_type & PERF_SAMPLE_REGS_INTR) { /* regs dump ABI info */ int size = sizeof(u64); perf_sample_regs_intr(&data->regs_intr, regs); if (data->regs_intr.regs) { u64 mask = event->attr.sample_regs_intr; size += hweight64(mask) * sizeof(u64); } header->size += size; } }
0
linux
a399b29dfbaaaf91162b2dc5a5875dd51bbfa2a1
NOT_APPLICABLE
NOT_APPLICABLE
void __init shm_init (void) { ipc_init_proc_interface("sysvipc/shm", #if BITS_PER_LONG <= 32 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #else " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #endif IPC_SHM_IDS, sysvipc_shm_proc_show); }
0
libgit2
58a6fe94cb851f71214dbefac3f9bffee437d6fe
NOT_APPLICABLE
NOT_APPLICABLE
static bool is_racy_entry(git_index *index, const git_index_entry *entry) { /* Git special-cases submodules in the check */ if (S_ISGITLINK(entry->mode)) return false; return git_index_entry_newer_than_index(entry, index); }
0
FFmpeg
689e59b7ffed34eba6159dcc78e87133862e3746
NOT_APPLICABLE
NOT_APPLICABLE
static int mov_read_glbl(MOVContext *c, AVIOContext *pb, MOVAtom atom) { AVStream *st; if (c->fc->nb_streams < 1) return 0; st = c->fc->streams[c->fc->nb_streams-1]; if ((uint64_t)atom.size > (1<<30)) return AVERROR_INVALIDDATA; if (atom.size >= 10) { unsigned size = avio_rb32(pb); unsigned type = avio_rl32(pb); avio_seek(pb, -8, SEEK_CUR); if (type == MKTAG('f','i','e','l') && size == atom.size) return mov_read_default(c, pb, atom); } av_free(st->codec->extradata); st->codec->extradata_size = 0; st->codec->extradata = av_mallocz(atom.size + FF_INPUT_BUFFER_PADDING_SIZE); if (!st->codec->extradata) return AVERROR(ENOMEM); st->codec->extradata_size = atom.size; avio_read(pb, st->codec->extradata, atom.size); return 0; }
0
Chrome
108147dfd1ea159fd3632ef92ccc4ab8952980c7
NOT_APPLICABLE
NOT_APPLICABLE
bool ContentSecurityPolicy::IsValidCSPAttr(const String& attr, const String& context_required_csp) { if (attr.Contains('\n') || attr.Contains('\r')) return false; ContentSecurityPolicy* attr_policy = ContentSecurityPolicy::Create(); attr_policy->AddPolicyFromHeaderValue(attr, kContentSecurityPolicyHeaderTypeEnforce, kContentSecurityPolicyHeaderSourceHTTP); if (!attr_policy->console_messages_.IsEmpty() || attr_policy->policies_.size() != 1) { return false; } for (auto& directiveList : attr_policy->policies_) { if (directiveList->ReportEndpoints().size() != 0) return false; } if (context_required_csp.IsEmpty() || context_required_csp.IsNull()) { return true; } ContentSecurityPolicy* context_policy = ContentSecurityPolicy::Create(); context_policy->AddPolicyFromHeaderValue( context_required_csp, kContentSecurityPolicyHeaderTypeEnforce, kContentSecurityPolicyHeaderSourceHTTP); DCHECK(context_policy->console_messages_.IsEmpty() && context_policy->policies_.size() == 1); return context_policy->Subsumes(*attr_policy); }
0
Chrome
94bb8861ec61b4ebcce8a4489be2cf7e2a055d90
NOT_APPLICABLE
NOT_APPLICABLE
AudioBuffer* ConvolverNode::buffer() { ASSERT(isMainThread()); return m_buffer.get(); }
0
Chrome
e3aa8a56706c4abe208934d5c294f7b594b8b693
NOT_APPLICABLE
NOT_APPLICABLE
void SetPolicy(PolicyMap* policies, const char* key, std::unique_ptr<base::Value> value) { if (value) { policies->Set(key, POLICY_LEVEL_MANDATORY, POLICY_SCOPE_USER, POLICY_SOURCE_CLOUD, std::move(value), nullptr); } else { policies->Erase(key); } }
0
Chrome
061ddbae1ee31476b57ea44a953970ab2fe8aca1
NOT_APPLICABLE
NOT_APPLICABLE
void DocumentWriter::setDocumentWasLoadedAsPartOfNavigation() { ASSERT(!m_parser->isStopped()); m_parser->setDocumentWasLoadedAsPartOfNavigation(); }
0
passenger
34b1087870c2bf85ebfd72c30b78577e10ab9744
NOT_APPLICABLE
NOT_APPLICABLE
extractDirName(const StaticString &path) { char *path_copy = strdup(path.c_str()); char *result = dirname(path_copy); string result_string(result); free(path_copy); return result_string; }
0
Android
e8bbf5b0889790cf8616f4004867f0ff656f0551
NOT_APPLICABLE
NOT_APPLICABLE
void smp_br_check_authorization_request(tSMP_CB* p_cb, tSMP_INT_DATA* p_data) { uint8_t reason = SMP_SUCCESS; SMP_TRACE_DEBUG("%s rcvs i_keys=0x%x r_keys=0x%x (i-initiator r-responder)", __func__, p_cb->local_i_key, p_cb->local_r_key); /* In LE SC mode LK field is ignored when BR/EDR transport is used */ p_cb->local_i_key &= ~SMP_SEC_KEY_TYPE_LK; p_cb->local_r_key &= ~SMP_SEC_KEY_TYPE_LK; /* In LE SC mode only IRK, IAI, CSRK are exchanged with the peer. ** Set local_r_key on master to expect only these keys. */ if (p_cb->role == HCI_ROLE_MASTER) { p_cb->local_r_key &= (SMP_SEC_KEY_TYPE_ID | SMP_SEC_KEY_TYPE_CSRK); } /* Check if H7 function needs to be used for key derivation*/ if ((p_cb->loc_auth_req & SMP_H7_SUPPORT_BIT) && (p_cb->peer_auth_req & SMP_H7_SUPPORT_BIT)) { p_cb->key_derivation_h7_used = TRUE; } SMP_TRACE_DEBUG("%s: use h7 = %d", __func__, p_cb->key_derivation_h7_used); SMP_TRACE_DEBUG( "%s rcvs upgrades: i_keys=0x%x r_keys=0x%x (i-initiator r-responder)", __func__, p_cb->local_i_key, p_cb->local_r_key); if (/*((p_cb->peer_auth_req & SMP_AUTH_BOND) || (p_cb->loc_auth_req & SMP_AUTH_BOND)) &&*/ (p_cb->local_i_key || p_cb->local_r_key)) { smp_br_state_machine_event(p_cb, SMP_BR_BOND_REQ_EVT, NULL); /* if no peer key is expected, start master key distribution */ if (p_cb->role == HCI_ROLE_MASTER && p_cb->local_r_key == 0) smp_key_distribution_by_transport(p_cb, NULL); } else { smp_br_state_machine_event(p_cb, SMP_BR_AUTH_CMPL_EVT, &reason); } }
0
Chrome
f85a87ec670ad0fce9d98d90c9a705b72a288154
NOT_APPLICABLE
NOT_APPLICABLE
static void attrWithJSGetterAndSetterAttributeSetterCallback(const v8::FunctionCallbackInfo<v8::Value>& info) { v8::Local<v8::Value> jsValue = info[0]; TRACE_EVENT_SET_SAMPLING_STATE("Blink", "DOMSetter"); TestObjectV8Internal::attrWithJSGetterAndSetterAttributeSetter(jsValue, info); TRACE_EVENT_SET_SAMPLING_STATE("V8", "V8Execution"); }
0
zlib
eff308af425b67093bab25f80f1ae950166bece1
NOT_APPLICABLE
NOT_APPLICABLE
unsigned long ZEXPORT inflateCodesUsed(strm) z_streamp strm; { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return (unsigned long)-1; state = (struct inflate_state FAR *)strm->state; return (unsigned long)(state->next - state->codes); }
0
linux
b4a1b4f5047e4f54e194681125c74c0aa64d637d
NOT_APPLICABLE
NOT_APPLICABLE
long keyctl_join_session_keyring(const char __user *_name) { char *name; long ret; /* fetch the name from userspace */ name = NULL; if (_name) { name = strndup_user(_name, KEY_MAX_DESC_SIZE); if (IS_ERR(name)) { ret = PTR_ERR(name); goto error; } } /* join the session */ ret = join_session_keyring(name); kfree(name); error: return ret; }
0
linux
3a4d44b6162555070194e486ff6b3799a8d323a2
NOT_APPLICABLE
NOT_APPLICABLE
void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec) { while (nsec >= NSEC_PER_SEC) { /* * The following asm() prevents the compiler from * optimising this loop into a modulo operation. See * also __iter_div_u64_rem() in include/linux/time.h */ asm("" : "+rm"(nsec)); nsec -= NSEC_PER_SEC; ++sec; } while (nsec < 0) { asm("" : "+rm"(nsec)); nsec += NSEC_PER_SEC; --sec; } ts->tv_sec = sec; ts->tv_nsec = nsec; }
0
mruby
faa4eaf6803bd11669bc324b4c34e7162286bfa3
NOT_APPLICABLE
NOT_APPLICABLE
check_type(mrb_state *mrb, mrb_value val, enum mrb_vtype t, const char *c, const char *m) { mrb_value tmp; tmp = mrb_check_convert_type(mrb, val, t, c, m); if (mrb_nil_p(tmp)) { mrb_raisef(mrb, E_TYPE_ERROR, "expected %S", mrb_str_new_cstr(mrb, c)); } return tmp; }
0