project
stringclasses 788
values | commit_id
stringlengths 6
81
| CVE ID
stringlengths 13
16
| CWE ID
stringclasses 126
values | func
stringlengths 14
482k
| vul
int8 0
1
|
---|---|---|---|---|---|
mono | 8e890a3bf80a4620e417814dc14886b1bbd17625 | NOT_APPLICABLE | NOT_APPLICABLE | mono_loader_set_error_bad_image (char *msg)
{
MonoLoaderError *error;
if (mono_loader_get_last_error ())
return;
error = g_new0 (MonoLoaderError, 1);
error->exception_type = MONO_EXCEPTION_BAD_IMAGE;
error->msg = msg;
set_loader_error (error);
} | 0 |
libjpeg-turbo | 3d9c64e9f8aa1ee954d1d0bb3390fc894bb84da3 | NOT_APPLICABLE | NOT_APPLICABLE | DLLEXPORT int tjCompressFromYUV(tjhandle handle, const unsigned char *srcBuf,
int width, int pad, int height, int subsamp,
unsigned char **jpegBuf,
unsigned long *jpegSize, int jpegQual,
int flags)
{
const unsigned char *srcPlanes[3];
int pw0, ph0, strides[3], retval = -1;
tjinstance *this = (tjinstance *)handle;
if (!this) _throwg("tjCompressFromYUV(): Invalid handle");
this->isInstanceError = FALSE;
if (srcBuf == NULL || width <= 0 || pad < 1 || height <= 0 || subsamp < 0 ||
subsamp >= NUMSUBOPT)
_throw("tjCompressFromYUV(): Invalid argument");
pw0 = tjPlaneWidth(0, width, subsamp);
ph0 = tjPlaneHeight(0, height, subsamp);
srcPlanes[0] = srcBuf;
strides[0] = PAD(pw0, pad);
if (subsamp == TJSAMP_GRAY) {
strides[1] = strides[2] = 0;
srcPlanes[1] = srcPlanes[2] = NULL;
} else {
int pw1 = tjPlaneWidth(1, width, subsamp);
int ph1 = tjPlaneHeight(1, height, subsamp);
strides[1] = strides[2] = PAD(pw1, pad);
srcPlanes[1] = srcPlanes[0] + strides[0] * ph0;
srcPlanes[2] = srcPlanes[1] + strides[1] * ph1;
}
return tjCompressFromYUVPlanes(handle, srcPlanes, width, strides, height,
subsamp, jpegBuf, jpegSize, jpegQual, flags);
bailout:
return retval;
} | 0 |
Chrome | c13e1da62b5f5f0e6fe8c1f769a5a28415415244 | NOT_APPLICABLE | NOT_APPLICABLE | void GLES2DecoderImpl::DeleteFramebuffersHelper(
GLsizei n, const GLuint* client_ids) {
for (GLsizei ii = 0; ii < n; ++ii) {
FramebufferManager::FramebufferInfo* info =
GetFramebufferInfo(client_ids[ii]);
if (info) {
if (info == bound_draw_framebuffer_) {
bound_draw_framebuffer_ = NULL;
state_dirty_ = true;
}
GLuint service_id = info->service_id();
glDeleteFramebuffersEXT(1, &service_id);
RemoveFramebufferInfo(client_ids[ii]);
}
}
}
| 0 |
Chrome | 5f372f899b8709dac700710b5f0f90959dcf9ecb | NOT_APPLICABLE | NOT_APPLICABLE | void AutoFillManager::FillPhoneNumberField(const AutoFillProfile* profile,
webkit_glue::FormField* field) {
string16 number = profile->GetFieldText(AutoFillType(PHONE_HOME_NUMBER));
bool has_valid_suffix_and_prefix = (number.length() ==
static_cast<size_t>(PhoneNumber::kPrefixLength +
PhoneNumber::kSuffixLength));
if (has_valid_suffix_and_prefix &&
field->max_length() == PhoneNumber::kPrefixLength) {
number = number.substr(PhoneNumber::kPrefixOffset,
PhoneNumber::kPrefixLength);
field->set_value(number);
} else if (has_valid_suffix_and_prefix &&
field->max_length() == PhoneNumber::kSuffixLength) {
number = number.substr(PhoneNumber::kSuffixOffset,
PhoneNumber::kSuffixLength);
field->set_value(number);
} else {
field->set_value(number);
}
}
| 0 |
php-src | 553702980ae89c83f2d6e254d62cf82e204956d0 | NOT_APPLICABLE | NOT_APPLICABLE | static int comparewithmap(gdImagePtr im1, gdImagePtr im2, int c1, int c2, int *colorMap)
{
if(!colorMap) {
return c1 == c2;
}
if(-2 != colorMap[c1]) {
return colorMap[c1] == c2;
}
return (colorMap[c1] = gdImageColorExactAlpha(im2, im1->red[c1], im1->green[c1], im1->blue[c1], im1->alpha[c1])) == c2;
}
| 0 |
libconfuse | d73777c2c3566fb2647727bb56d9a2295b81669b | NOT_APPLICABLE | NOT_APPLICABLE | DLLIMPORT int cfg_opt_rmtsec(cfg_opt_t *opt, const char *title)
{
unsigned int i, n;
if (!opt || !title) {
errno = EINVAL;
return CFG_FAIL;
}
if (!is_set(CFGF_TITLE, opt->flags))
return CFG_FAIL;
n = cfg_opt_size(opt);
for (i = 0; i < n; i++) {
cfg_t *sec = cfg_opt_getnsec(opt, i);
if (!sec || !sec->title)
return CFG_FAIL;
if (is_set(CFGF_NOCASE, opt->flags)) {
if (strcasecmp(title, sec->title) == 0)
break;
} else {
if (strcmp(title, sec->title) == 0)
break;
}
}
if (i == n)
return CFG_FAIL;
return cfg_opt_rmnsec(opt, i);
} | 0 |
linux-2.6 | 59839dfff5eabca01cc4e20b45797a60a80af8cb | NOT_APPLICABLE | NOT_APPLICABLE | static void post_kvm_run_save(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
kvm_run->ready_for_interrupt_injection = 1;
else
kvm_run->ready_for_interrupt_injection =
(vcpu->arch.interrupt_window_open &&
vcpu->arch.irq_summary == 0);
} | 0 |
pacemaker | 84ac07c | NOT_APPLICABLE | NOT_APPLICABLE | hash2smartfield(gpointer key, gpointer value, gpointer user_data)
{
const char *name = key;
const char *s_value = value;
xmlNode *xml_node = user_data;
if (isdigit(name[0])) {
xmlNode *tmp = create_xml_node(xml_node, XML_TAG_PARAM);
crm_xml_add(tmp, XML_NVPAIR_ATTR_NAME, name);
crm_xml_add(tmp, XML_NVPAIR_ATTR_VALUE, s_value);
} else if (crm_element_value(xml_node, name) == NULL) {
crm_xml_add(xml_node, name, s_value);
crm_trace("dumped: %s=%s", name, s_value);
} else {
crm_trace("duplicate: %s=%s", name, s_value);
}
}
| 0 |
Chrome | eb4d5d9ab41449b79fcf6f84d8983be2b12bd490 | NOT_APPLICABLE | NOT_APPLICABLE | NodeListsNodeData& ContainerNode::ensureNodeLists()
{
return ensureRareData().ensureNodeLists();
}
| 0 |
openjpeg | 0fa5a17c98c4b8f9ee2286f4f0a50cf52a5fccb0 | NOT_APPLICABLE | NOT_APPLICABLE | OPJ_BOOL opj_j2k_write_tlm( opj_j2k_t *p_j2k,
opj_stream_private_t *p_stream,
opj_event_mgr_t * p_manager
)
{
OPJ_BYTE * l_current_data = 00;
OPJ_UINT32 l_tlm_size;
/* preconditions */
assert(p_j2k != 00);
assert(p_manager != 00);
assert(p_stream != 00);
l_tlm_size = 6 + (5*p_j2k->m_specific_param.m_encoder.m_total_tile_parts);
if (l_tlm_size > p_j2k->m_specific_param.m_encoder.m_header_tile_data_size) {
OPJ_BYTE *new_header_tile_data = (OPJ_BYTE *) opj_realloc(p_j2k->m_specific_param.m_encoder.m_header_tile_data, l_tlm_size);
if (! new_header_tile_data) {
opj_free(p_j2k->m_specific_param.m_encoder.m_header_tile_data);
p_j2k->m_specific_param.m_encoder.m_header_tile_data = NULL;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = 0;
opj_event_msg(p_manager, EVT_ERROR, "Not enough memory to write TLM marker\n");
return OPJ_FALSE;
}
p_j2k->m_specific_param.m_encoder.m_header_tile_data = new_header_tile_data;
p_j2k->m_specific_param.m_encoder.m_header_tile_data_size = l_tlm_size;
}
l_current_data = p_j2k->m_specific_param.m_encoder.m_header_tile_data;
/* change the way data is written to avoid seeking if possible */
/* TODO */
p_j2k->m_specific_param.m_encoder.m_tlm_start = opj_stream_tell(p_stream);
opj_write_bytes(l_current_data,J2K_MS_TLM,2); /* TLM */
l_current_data += 2;
opj_write_bytes(l_current_data,l_tlm_size-2,2); /* Lpoc */
l_current_data += 2;
opj_write_bytes(l_current_data,0,1); /* Ztlm=0*/
++l_current_data;
opj_write_bytes(l_current_data,0x50,1); /* Stlm ST=1(8bits-255 tiles max),SP=1(Ptlm=32bits) */
++l_current_data;
/* do nothing on the 5 * l_j2k->m_specific_param.m_encoder.m_total_tile_parts remaining data */
if (opj_stream_write_data(p_stream,p_j2k->m_specific_param.m_encoder.m_header_tile_data,l_tlm_size,p_manager) != l_tlm_size) {
return OPJ_FALSE;
}
return OPJ_TRUE;
} | 0 |
libarchive | 5562545b5562f6d12a4ef991fae158bf4ccf92b6 | NOT_APPLICABLE | NOT_APPLICABLE | parse_codes(struct archive_read *a)
{
int i, j, val, n, r;
unsigned char bitlengths[MAX_SYMBOLS], zerocount, ppmd_flags;
unsigned int maxorder;
struct huffman_code precode;
struct rar *rar = (struct rar *)(a->format->data);
struct rar_br *br = &(rar->br);
free_codes(a);
/* Skip to the next byte */
rar_br_consume_unalined_bits(br);
/* PPMd block flag */
if (!rar_br_read_ahead(a, br, 1))
goto truncated_data;
if ((rar->is_ppmd_block = rar_br_bits(br, 1)) != 0)
{
rar_br_consume(br, 1);
if (!rar_br_read_ahead(a, br, 7))
goto truncated_data;
ppmd_flags = rar_br_bits(br, 7);
rar_br_consume(br, 7);
/* Memory is allocated in MB */
if (ppmd_flags & 0x20)
{
if (!rar_br_read_ahead(a, br, 8))
goto truncated_data;
rar->dictionary_size = (rar_br_bits(br, 8) + 1) << 20;
rar_br_consume(br, 8);
}
if (ppmd_flags & 0x40)
{
if (!rar_br_read_ahead(a, br, 8))
goto truncated_data;
rar->ppmd_escape = rar->ppmd7_context.InitEsc = rar_br_bits(br, 8);
rar_br_consume(br, 8);
}
else
rar->ppmd_escape = 2;
if (ppmd_flags & 0x20)
{
maxorder = (ppmd_flags & 0x1F) + 1;
if(maxorder > 16)
maxorder = 16 + (maxorder - 16) * 3;
if (maxorder == 1)
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
return (ARCHIVE_FATAL);
}
/* Make sure ppmd7_contest is freed before Ppmd7_Construct
* because reading a broken file cause this abnormal sequence. */
__archive_ppmd7_functions.Ppmd7_Free(&rar->ppmd7_context, &g_szalloc);
rar->bytein.a = a;
rar->bytein.Read = &ppmd_read;
__archive_ppmd7_functions.PpmdRAR_RangeDec_CreateVTable(&rar->range_dec);
rar->range_dec.Stream = &rar->bytein;
__archive_ppmd7_functions.Ppmd7_Construct(&rar->ppmd7_context);
if (rar->dictionary_size == 0) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid zero dictionary size");
return (ARCHIVE_FATAL);
}
if (!__archive_ppmd7_functions.Ppmd7_Alloc(&rar->ppmd7_context,
rar->dictionary_size, &g_szalloc))
{
archive_set_error(&a->archive, ENOMEM,
"Out of memory");
return (ARCHIVE_FATAL);
}
if (!__archive_ppmd7_functions.PpmdRAR_RangeDec_Init(&rar->range_dec))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unable to initialize PPMd range decoder");
return (ARCHIVE_FATAL);
}
__archive_ppmd7_functions.Ppmd7_Init(&rar->ppmd7_context, maxorder);
rar->ppmd_valid = 1;
}
else
{
if (!rar->ppmd_valid) {
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Invalid PPMd sequence");
return (ARCHIVE_FATAL);
}
if (!__archive_ppmd7_functions.PpmdRAR_RangeDec_Init(&rar->range_dec))
{
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Unable to initialize PPMd range decoder");
return (ARCHIVE_FATAL);
}
}
}
else
{
rar_br_consume(br, 1);
/* Keep existing table flag */
if (!rar_br_read_ahead(a, br, 1))
goto truncated_data;
if (!rar_br_bits(br, 1))
memset(rar->lengthtable, 0, sizeof(rar->lengthtable));
rar_br_consume(br, 1);
memset(&bitlengths, 0, sizeof(bitlengths));
for (i = 0; i < MAX_SYMBOLS;)
{
if (!rar_br_read_ahead(a, br, 4))
goto truncated_data;
bitlengths[i++] = rar_br_bits(br, 4);
rar_br_consume(br, 4);
if (bitlengths[i-1] == 0xF)
{
if (!rar_br_read_ahead(a, br, 4))
goto truncated_data;
zerocount = rar_br_bits(br, 4);
rar_br_consume(br, 4);
if (zerocount)
{
i--;
for (j = 0; j < zerocount + 2 && i < MAX_SYMBOLS; j++)
bitlengths[i++] = 0;
}
}
}
memset(&precode, 0, sizeof(precode));
r = create_code(a, &precode, bitlengths, MAX_SYMBOLS, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK) {
free(precode.tree);
free(precode.table);
return (r);
}
for (i = 0; i < HUFFMAN_TABLE_SIZE;)
{
if ((val = read_next_symbol(a, &precode)) < 0) {
free(precode.tree);
free(precode.table);
return (ARCHIVE_FATAL);
}
if (val < 16)
{
rar->lengthtable[i] = (rar->lengthtable[i] + val) & 0xF;
i++;
}
else if (val < 18)
{
if (i == 0)
{
free(precode.tree);
free(precode.table);
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Internal error extracting RAR file.");
return (ARCHIVE_FATAL);
}
if(val == 16) {
if (!rar_br_read_ahead(a, br, 3)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 3) + 3;
rar_br_consume(br, 3);
} else {
if (!rar_br_read_ahead(a, br, 7)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 7) + 11;
rar_br_consume(br, 7);
}
for (j = 0; j < n && i < HUFFMAN_TABLE_SIZE; j++)
{
rar->lengthtable[i] = rar->lengthtable[i-1];
i++;
}
}
else
{
if(val == 18) {
if (!rar_br_read_ahead(a, br, 3)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 3) + 3;
rar_br_consume(br, 3);
} else {
if (!rar_br_read_ahead(a, br, 7)) {
free(precode.tree);
free(precode.table);
goto truncated_data;
}
n = rar_br_bits(br, 7) + 11;
rar_br_consume(br, 7);
}
for(j = 0; j < n && i < HUFFMAN_TABLE_SIZE; j++)
rar->lengthtable[i++] = 0;
}
}
free(precode.tree);
free(precode.table);
r = create_code(a, &rar->maincode, &rar->lengthtable[0], MAINCODE_SIZE,
MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
r = create_code(a, &rar->offsetcode, &rar->lengthtable[MAINCODE_SIZE],
OFFSETCODE_SIZE, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
r = create_code(a, &rar->lowoffsetcode,
&rar->lengthtable[MAINCODE_SIZE + OFFSETCODE_SIZE],
LOWOFFSETCODE_SIZE, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
r = create_code(a, &rar->lengthcode,
&rar->lengthtable[MAINCODE_SIZE + OFFSETCODE_SIZE +
LOWOFFSETCODE_SIZE], LENGTHCODE_SIZE, MAX_SYMBOL_LENGTH);
if (r != ARCHIVE_OK)
return (r);
}
if (!rar->dictionary_size || !rar->lzss.window)
{
/* Seems as though dictionary sizes are not used. Even so, minimize
* memory usage as much as possible.
*/
void *new_window;
unsigned int new_size;
if (rar->unp_size >= DICTIONARY_MAX_SIZE)
new_size = DICTIONARY_MAX_SIZE;
else
new_size = rar_fls((unsigned int)rar->unp_size) << 1;
new_window = realloc(rar->lzss.window, new_size);
if (new_window == NULL) {
archive_set_error(&a->archive, ENOMEM,
"Unable to allocate memory for uncompressed data.");
return (ARCHIVE_FATAL);
}
rar->lzss.window = (unsigned char *)new_window;
rar->dictionary_size = new_size;
memset(rar->lzss.window, 0, rar->dictionary_size);
rar->lzss.mask = rar->dictionary_size - 1;
}
rar->start_new_table = 0;
return (ARCHIVE_OK);
truncated_data:
archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT,
"Truncated RAR file data");
rar->valid = 0;
return (ARCHIVE_FATAL);
}
| 0 |
vim | dc5490e2cbc8c16022a23b449b48c1bd0083f366 | NOT_APPLICABLE | NOT_APPLICABLE | ex_z(exarg_T *eap)
{
char_u *x;
long bigness;
char_u *kind;
int minus = 0;
linenr_T start, end, curs, i;
int j;
linenr_T lnum = eap->line2;
// Vi compatible: ":z!" uses display height, without a count uses
// 'scroll'
if (eap->forceit)
bigness = Rows - 1;
else if (!ONE_WINDOW)
bigness = curwin->w_height - 3;
else
bigness = curwin->w_p_scr * 2;
if (bigness < 1)
bigness = 1;
x = eap->arg;
kind = x;
if (*kind == '-' || *kind == '+' || *kind == '='
|| *kind == '^' || *kind == '.')
++x;
while (*x == '-' || *x == '+')
++x;
if (*x != 0)
{
if (!VIM_ISDIGIT(*x))
{
emsg(_(e_non_numeric_argument_to_z));
return;
}
else
{
bigness = atol((char *)x);
// bigness could be < 0 if atol(x) overflows.
if (bigness > 2 * curbuf->b_ml.ml_line_count || bigness < 0)
bigness = 2 * curbuf->b_ml.ml_line_count;
p_window = bigness;
if (*kind == '=')
bigness += 2;
}
}
// the number of '-' and '+' multiplies the distance
if (*kind == '-' || *kind == '+')
for (x = kind + 1; *x == *kind; ++x)
;
switch (*kind)
{
case '-':
start = lnum - bigness * (linenr_T)(x - kind) + 1;
end = start + bigness - 1;
curs = end;
break;
case '=':
start = lnum - (bigness + 1) / 2 + 1;
end = lnum + (bigness + 1) / 2 - 1;
curs = lnum;
minus = 1;
break;
case '^':
start = lnum - bigness * 2;
end = lnum - bigness;
curs = lnum - bigness;
break;
case '.':
start = lnum - (bigness + 1) / 2 + 1;
end = lnum + (bigness + 1) / 2 - 1;
curs = end;
break;
default: // '+'
start = lnum;
if (*kind == '+')
start += bigness * (linenr_T)(x - kind - 1) + 1;
else if (eap->addr_count == 0)
++start;
end = start + bigness - 1;
curs = end;
break;
}
if (start < 1)
start = 1;
if (end > curbuf->b_ml.ml_line_count)
end = curbuf->b_ml.ml_line_count;
if (curs > curbuf->b_ml.ml_line_count)
curs = curbuf->b_ml.ml_line_count;
else if (curs < 1)
curs = 1;
for (i = start; i <= end; i++)
{
if (minus && i == lnum)
{
msg_putchar('\n');
for (j = 1; j < Columns; j++)
msg_putchar('-');
}
print_line(i, eap->flags & EXFLAG_NR, eap->flags & EXFLAG_LIST);
if (minus && i == lnum)
{
msg_putchar('\n');
for (j = 1; j < Columns; j++)
msg_putchar('-');
}
}
if (curwin->w_cursor.lnum != curs)
{
curwin->w_cursor.lnum = curs;
curwin->w_cursor.col = 0;
}
ex_no_reprint = TRUE;
} | 0 |
booth | 35bf0b7b048d715f671eb68974fb6b4af6528c67 | NOT_APPLICABLE | NOT_APPLICABLE | static int _lockfile(int mode, int *fdp, pid_t *locked_by)
{
struct flock lock;
int fd, rv;
/* After reboot the directory may not yet exist.
* Try to create it, but ignore errors. */
if (strncmp(cl.lockfile, BOOTH_RUN_DIR,
strlen(BOOTH_RUN_DIR)) == 0)
(void)mkdir(BOOTH_RUN_DIR, 0775);
if (locked_by)
*locked_by = 0;
*fdp = -1;
fd = open(cl.lockfile, mode, 0664);
if (fd < 0)
return errno;
*fdp = fd;
lock.l_type = F_WRLCK;
lock.l_start = 0;
lock.l_whence = SEEK_SET;
lock.l_len = 0;
lock.l_pid = 0;
if (fcntl(fd, F_SETLK, &lock) == 0)
return 0;
rv = errno;
if (locked_by)
if (fcntl(fd, F_GETLK, &lock) == 0)
*locked_by = lock.l_pid;
return rv;
} | 0 |
file-roller | b147281293a8307808475e102a14857055f81631 | NOT_APPLICABLE | NOT_APPLICABLE | paste_from_archive_list_ready_cb (GObject *source_object,
GAsyncResult *result,
gpointer user_data)
{
FrWindow *window = user_data;
GError *error = NULL;
if (! fr_archive_operation_finish (FR_ARCHIVE (source_object), result, &error)) {
_paste_from_archive_operation_completed (window, FR_ACTION_PASTING_FILES, error);
g_error_free (error);
return;
}
fr_archive_action_started (window->priv->copy_from_archive, FR_ACTION_EXTRACTING_FILES);
fr_archive_extract (window->priv->copy_from_archive,
window->priv->clipboard_data->files,
window->priv->clipboard_data->tmp_dir,
NULL,
FALSE,
TRUE,
FALSE,
window->priv->clipboard_data->password,
window->priv->cancellable,
paste_from_archive_extract_ready_cb,
window);
} | 0 |
linux | 973c096f6a85e5b5f2a295126ba6928d9a6afd45 | NOT_APPLICABLE | NOT_APPLICABLE | static int vgacon_switch(struct vc_data *c)
{
int x = c->vc_cols * VGA_FONTWIDTH;
int y = c->vc_rows * c->vc_font.height;
int rows = screen_info.orig_video_lines * vga_default_font_height/
c->vc_font.height;
/*
* We need to save screen size here as it's the only way
* we can spot the screen has been resized and we need to
* set size of freshly allocated screens ourselves.
*/
vga_video_num_columns = c->vc_cols;
vga_video_num_lines = c->vc_rows;
/* We can only copy out the size of the video buffer here,
* otherwise we get into VGA BIOS */
if (!vga_is_gfx) {
scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf,
c->vc_screenbuf_size > vga_vram_size ?
vga_vram_size : c->vc_screenbuf_size);
if ((vgacon_xres != x || vgacon_yres != y) &&
(!(vga_video_num_columns % 2) &&
vga_video_num_columns <= screen_info.orig_video_cols &&
vga_video_num_lines <= rows))
vgacon_doresize(c, c->vc_cols, c->vc_rows);
}
return 0; /* Redrawing not needed */
} | 0 |
libsoup | cbeeb7a0f7f0e8b16f2d382157496f9100218dea | NOT_APPLICABLE | NOT_APPLICABLE | unregister_handler (SoupServerHandler *handler)
{
if (handler->destroy)
handler->destroy (handler->user_data);
} | 0 |
linux | e0b0cb9388642c104838fac100a4af32745621e2 | NOT_APPLICABLE | NOT_APPLICABLE | int vbg_req_perform(struct vbg_dev *gdev, void *req)
{
unsigned long phys_req = virt_to_phys(req);
outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
/*
* The host changes the request as a result of the outl, make sure
* the outl and any reads of the req happen in the correct order.
*/
mb();
return ((struct vmmdev_request_header *)req)->rc;
} | 0 |
ceph | 4c11203122d729c832a645c9e3f5092db4963840 | NOT_APPLICABLE | NOT_APPLICABLE | void ProtocolV1::send_message(Message *m) {
bufferlist bl;
uint64_t f = connection->get_features();
// TODO: Currently not all messages supports reencode like MOSDMap, so here
// only let fast dispatch support messages prepare message
bool can_fast_prepare = messenger->ms_can_fast_dispatch(m);
if (can_fast_prepare) {
prepare_send_message(f, m, bl);
}
std::lock_guard<std::mutex> l(connection->write_lock);
// "features" changes will change the payload encoding
if (can_fast_prepare &&
(can_write == WriteStatus::NOWRITE || connection->get_features() != f)) {
// ensure the correctness of message encoding
bl.clear();
m->clear_payload();
ldout(cct, 5) << __func__ << " clear encoded buffer previous " << f
<< " != " << connection->get_features() << dendl;
}
if (can_write == WriteStatus::CLOSED) {
ldout(cct, 10) << __func__ << " connection closed."
<< " Drop message " << m << dendl;
m->put();
} else {
m->trace.event("async enqueueing message");
out_q[m->get_priority()].emplace_back(std::move(bl), m);
ldout(cct, 15) << __func__ << " inline write is denied, reschedule m=" << m
<< dendl;
if (can_write != WriteStatus::REPLACING && !write_in_progress) {
write_in_progress = true;
connection->center->dispatch_event_external(connection->write_handler);
}
}
} | 0 |
Chrome | 6ed26f014f76f10e76e80636027a2db9dcbe1664 | NOT_APPLICABLE | NOT_APPLICABLE | String BaseRenderingContext2D::filter() const {
return GetState().UnparsedFilter();
}
| 0 |
linux | c95eb3184ea1a3a2551df57190c81da695e2144b | NOT_APPLICABLE | NOT_APPLICABLE | armpmu_read(struct perf_event *event)
{
armpmu_event_update(event);
}
| 0 |
savannah | 18a8f0d9943369449bc4de92d411c78fb08d616c | NOT_APPLICABLE | NOT_APPLICABLE | FT_New_Face_From_FSSpec( FT_Library library,
const FSSpec* spec,
FT_Long face_index,
FT_Face* aface )
{
#if ( __LP64__ ) || ( defined( MAC_OS_X_VERSION_10_5 ) && \
( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 ) )
FT_UNUSED( library );
FT_UNUSED( spec );
FT_UNUSED( face_index );
FT_UNUSED( aface );
return FT_THROW( Unimplemented_Feature );
#else
FSRef ref;
if ( !spec || FSpMakeFSRef( spec, &ref ) != noErr )
return FT_THROW( Invalid_Argument );
else
return FT_New_Face_From_FSRef( library, &ref, face_index, aface );
#endif
}
| 0 |
Chrome | eb4bcacd683a68534bbe2e4d8d6eeafafc7f57ba | NOT_APPLICABLE | NOT_APPLICABLE | bool ResourceDispatcherHostImpl::HandleExternalProtocol(ResourceLoader* loader,
const GURL& url) {
if (!delegate_)
return false;
ResourceRequestInfoImpl* info = loader->GetRequestInfo();
if (!ResourceType::IsFrame(info->GetResourceType()))
return false;
const net::URLRequestJobFactory* job_factory =
info->GetContext()->GetRequestContext()->job_factory();
if (job_factory->IsHandledURL(url))
return false;
delegate_->HandleExternalProtocol(url, info->GetChildID(),
info->GetRouteID());
return true;
}
| 0 |
tensorflow | 8a6e874437670045e6c7dc6154c7412b4a2135e2 | NOT_APPLICABLE | NOT_APPLICABLE | void Compute(OpKernelContext* c) override {
const TensorShape& tl_a_shape = c->input(0).shape();
const TensorShape& tl_b_shape = c->input(1).shape();
OP_REQUIRES(
c, tl_a_shape == tl_b_shape,
errors::InvalidArgument("Incompatible input TensorList tensor shapes: ",
tl_a_shape.DebugString(), " vs. ",
tl_b_shape.DebugString()));
AllocatorAttributes attr;
std::unique_ptr<Tensor> tl_alias = c->forward_input(
0 /*input_index*/, 0 /*output_index*/, DT_VARIANT, tl_a_shape,
DEVICE_MEMORY /* input is always on DEVICE_MEMORY */, attr);
// tl_a may be aliased by tl_alias.
const Tensor& tl_a = c->input(0);
const Tensor& tl_b = c->input(1);
Tensor* output = nullptr;
bool ok_to_alias = tl_alias != nullptr;
if (tl_alias && tl_alias->dtype() == DT_VARIANT &&
tl_alias->NumElements() > 0) {
auto tl_a_t = tl_alias->flat<Variant>();
for (int64 i = 0; i < tl_alias->NumElements(); ++i) {
TensorList* aliased = tl_a_t(i).get<TensorList>();
if (aliased == nullptr || !aliased->RefCountIsOne()) {
ok_to_alias = false;
break;
}
}
if (ok_to_alias) {
c->set_output(0, *tl_alias);
output = tl_alias.get();
}
}
if (!ok_to_alias) {
// Couldn't alias the entire Tensor. We'll be conservative and not try
// to alias individual batch entries.
attr.set_on_host(true);
OP_REQUIRES_OK(c, c->allocate_output(0, tl_a_shape, &output, attr));
}
auto output_t = output->flat<Variant>();
auto tl_a_t = tl_a.flat<Variant>();
auto tl_b_t = tl_b.flat<Variant>();
for (int64 i = 0; i < tl_a.NumElements(); ++i) {
const TensorList* l_a = tl_a_t(i).get<TensorList>();
const TensorList* l_b = tl_b_t(i).get<TensorList>();
OP_REQUIRES(
c, l_a != nullptr,
errors::InvalidArgument("input_a is not a TensorList at index ", i,
". Saw: '", tl_a_t(i).DebugString(), "'"));
OP_REQUIRES(
c, l_b != nullptr,
errors::InvalidArgument("input_b is not a TensorList at index ", i,
". Saw: '", tl_b_t(i).DebugString(), "'"));
OP_REQUIRES(c, l_a->element_dtype == element_dtype_,
errors::InvalidArgument(
"input_a[", i, "].dtype != element_dtype. Saw: ",
DataTypeString(l_a->element_dtype), " vs. ",
DataTypeString(element_dtype_)));
OP_REQUIRES(c, l_b->element_dtype == element_dtype_,
errors::InvalidArgument(
"input_b[", i, "].dtype != element_dtype. Saw: ",
DataTypeString(l_b->element_dtype), " vs. ",
DataTypeString(element_dtype_)));
OP_REQUIRES(c, l_a->element_shape.IsIdenticalTo(l_b->element_shape),
errors::InvalidArgument(
"input_a and input_b TensorList element shapes are not "
"identical at index ",
i, ". Saw ", l_a->element_shape.DebugString(), " vs. ",
l_b->element_shape.DebugString()));
if (ok_to_alias) {
TensorList* out = output_t(i).get<TensorList>();
std::copy(l_b->tensors().begin(), l_b->tensors().end(),
std::back_inserter(out->tensors()));
} else {
TensorList out = l_a->Copy();
std::copy(l_b->tensors().begin(), l_b->tensors().end(),
std::back_inserter(out.tensors()));
output_t(i) = std::move(out);
}
}
} | 0 |
FFmpeg | e43a0a232dbf6d3c161823c2e07c52e76227a1bc | NOT_APPLICABLE | NOT_APPLICABLE | static av_cold int lutrgb_init(AVFilterContext *ctx)
{
LutContext *s = ctx->priv;
s->is_rgb = 1;
return 0;
}
| 0 |
gpac | 3dbe11b37d65c8472faf0654410068e5500b3adb | NOT_APPLICABLE | NOT_APPLICABLE | GF_Err tbox_box_size(GF_Box *s)
{
s->size += 8;
return GF_OK;
} | 0 |
linux | d2ffed5185df9d8d9ccd150e4340e3b6f96a8381 | NOT_APPLICABLE | NOT_APPLICABLE | amba_ahb_device_add(struct device *parent, const char *name,
resource_size_t base, size_t size, int irq1, int irq2,
void *pdata, unsigned int periphid)
{
return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
periphid, ~0ULL, &iomem_resource);
} | 0 |
Android | d44e5bde18a41beda39d49189bef7f2ba7c8f3cb | NOT_APPLICABLE | NOT_APPLICABLE | static void ToColor_S4444_Opaque(SkColor dst[], const void* src, int width,
SkColorTable*) {
SkASSERT(width > 0);
const SkPMColor16* s = (const SkPMColor16*)src;
do {
SkPMColor c = SkPixel4444ToPixel32(*s++);
*dst++ = SkColorSetRGB(SkGetPackedR32(c), SkGetPackedG32(c),
SkGetPackedB32(c));
} while (--width != 0);
}
| 0 |
php | 4435b9142ff9813845d5c97ab29a5d637bedb257 | NOT_APPLICABLE | NOT_APPLICABLE | PHP_FUNCTION(imagecolorclosestalpha)
{
zval *IM;
long red, green, blue, alpha;
gdImagePtr im;
if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "rllll", &IM, &red, &green, &blue, &alpha) == FAILURE) {
return;
}
ZEND_FETCH_RESOURCE(im, gdImagePtr, &IM, -1, "Image", le_gd);
RETURN_LONG(gdImageColorClosestAlpha(im, red, green, blue, alpha));
}
| 0 |
linux | 451a2886b6bf90e2fb378f7c46c655450fb96e81 | NOT_APPLICABLE | NOT_APPLICABLE | static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
{
struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
Sg_device *sdp;
struct scsi_device *scsidp;
unsigned long iflags;
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
if ((NULL == sdp) || (NULL == sdp->device) ||
(atomic_read(&sdp->detaching)))
seq_puts(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
else {
scsidp = sdp->device;
seq_printf(s, "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n",
scsidp->host->host_no, scsidp->channel,
scsidp->id, scsidp->lun, (int) scsidp->type,
1,
(int) scsidp->queue_depth,
(int) atomic_read(&scsidp->device_busy),
(int) scsi_device_online(scsidp));
}
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
}
| 0 |
spice | 95a0cfac8a1c8eff50f05e65df945da3bb501fc9 | NOT_APPLICABLE | NOT_APPLICABLE | static char *red_stream_get_remote_address(RedStream *stream)
{
return addr_to_string("%s;%s", &stream->priv->info->paddr_ext,
stream->priv->info->plen_ext);
} | 0 |
vim | 156d3911952d73b03d7420dc3540215247db0fe8 | NOT_APPLICABLE | NOT_APPLICABLE | spell_suggest(int count)
{
char_u *line;
pos_T prev_cursor = curwin->w_cursor;
char_u wcopy[MAXWLEN + 2];
char_u *p;
int i;
int c;
suginfo_T sug;
suggest_T *stp;
int mouse_used;
int need_cap;
int limit;
int selected = count;
int badlen = 0;
int msg_scroll_save = msg_scroll;
int wo_spell_save = curwin->w_p_spell;
if (!curwin->w_p_spell)
{
did_set_spelllang(curwin);
curwin->w_p_spell = TRUE;
}
if (*curwin->w_s->b_p_spl == NUL)
{
emsg(_(e_spell_checking_is_not_possible));
return;
}
if (VIsual_active)
{
// Use the Visually selected text as the bad word. But reject
// a multi-line selection.
if (curwin->w_cursor.lnum != VIsual.lnum)
{
vim_beep(BO_SPELL);
return;
}
badlen = (int)curwin->w_cursor.col - (int)VIsual.col;
if (badlen < 0)
badlen = -badlen;
else
curwin->w_cursor.col = VIsual.col;
++badlen;
end_visual_mode();
// make sure we don't include the NUL at the end of the line
line = ml_get_curline();
if (badlen > (int)STRLEN(line) - (int)curwin->w_cursor.col)
badlen = (int)STRLEN(line) - (int)curwin->w_cursor.col;
}
// Find the start of the badly spelled word.
else if (spell_move_to(curwin, FORWARD, TRUE, TRUE, NULL) == 0
|| curwin->w_cursor.col > prev_cursor.col)
{
// No bad word or it starts after the cursor: use the word under the
// cursor.
curwin->w_cursor = prev_cursor;
line = ml_get_curline();
p = line + curwin->w_cursor.col;
// Backup to before start of word.
while (p > line && spell_iswordp_nmw(p, curwin))
MB_PTR_BACK(line, p);
// Forward to start of word.
while (*p != NUL && !spell_iswordp_nmw(p, curwin))
MB_PTR_ADV(p);
if (!spell_iswordp_nmw(p, curwin)) // No word found.
{
beep_flush();
return;
}
curwin->w_cursor.col = (colnr_T)(p - line);
}
// Get the word and its length.
// Figure out if the word should be capitalised.
need_cap = check_need_cap(curwin->w_cursor.lnum, curwin->w_cursor.col);
// Make a copy of current line since autocommands may free the line.
line = vim_strsave(ml_get_curline());
if (line == NULL)
goto skip;
// Get the list of suggestions. Limit to 'lines' - 2 or the number in
// 'spellsuggest', whatever is smaller.
if (sps_limit > (int)Rows - 2)
limit = (int)Rows - 2;
else
limit = sps_limit;
spell_find_suggest(line + curwin->w_cursor.col, badlen, &sug, limit,
TRUE, need_cap, TRUE);
if (sug.su_ga.ga_len == 0)
msg(_("Sorry, no suggestions"));
else if (count > 0)
{
if (count > sug.su_ga.ga_len)
smsg(_("Sorry, only %ld suggestions"), (long)sug.su_ga.ga_len);
}
else
{
#ifdef FEAT_RIGHTLEFT
// When 'rightleft' is set the list is drawn right-left.
cmdmsg_rl = curwin->w_p_rl;
if (cmdmsg_rl)
msg_col = Columns - 1;
#endif
// List the suggestions.
msg_start();
msg_row = Rows - 1; // for when 'cmdheight' > 1
lines_left = Rows; // avoid more prompt
vim_snprintf((char *)IObuff, IOSIZE, _("Change \"%.*s\" to:"),
sug.su_badlen, sug.su_badptr);
#ifdef FEAT_RIGHTLEFT
if (cmdmsg_rl && STRNCMP(IObuff, "Change", 6) == 0)
{
// And now the rabbit from the high hat: Avoid showing the
// untranslated message rightleft.
vim_snprintf((char *)IObuff, IOSIZE, ":ot \"%.*s\" egnahC",
sug.su_badlen, sug.su_badptr);
}
#endif
msg_puts((char *)IObuff);
msg_clr_eos();
msg_putchar('\n');
msg_scroll = TRUE;
for (i = 0; i < sug.su_ga.ga_len; ++i)
{
stp = &SUG(sug.su_ga, i);
// The suggested word may replace only part of the bad word, add
// the not replaced part.
vim_strncpy(wcopy, stp->st_word, MAXWLEN);
if (sug.su_badlen > stp->st_orglen)
vim_strncpy(wcopy + stp->st_wordlen,
sug.su_badptr + stp->st_orglen,
sug.su_badlen - stp->st_orglen);
vim_snprintf((char *)IObuff, IOSIZE, "%2d", i + 1);
#ifdef FEAT_RIGHTLEFT
if (cmdmsg_rl)
rl_mirror(IObuff);
#endif
msg_puts((char *)IObuff);
vim_snprintf((char *)IObuff, IOSIZE, " \"%s\"", wcopy);
msg_puts((char *)IObuff);
// The word may replace more than "su_badlen".
if (sug.su_badlen < stp->st_orglen)
{
vim_snprintf((char *)IObuff, IOSIZE, _(" < \"%.*s\""),
stp->st_orglen, sug.su_badptr);
msg_puts((char *)IObuff);
}
if (p_verbose > 0)
{
// Add the score.
if (sps_flags & (SPS_DOUBLE | SPS_BEST))
vim_snprintf((char *)IObuff, IOSIZE, " (%s%d - %d)",
stp->st_salscore ? "s " : "",
stp->st_score, stp->st_altscore);
else
vim_snprintf((char *)IObuff, IOSIZE, " (%d)",
stp->st_score);
#ifdef FEAT_RIGHTLEFT
if (cmdmsg_rl)
// Mirror the numbers, but keep the leading space.
rl_mirror(IObuff + 1);
#endif
msg_advance(30);
msg_puts((char *)IObuff);
}
msg_putchar('\n');
}
#ifdef FEAT_RIGHTLEFT
cmdmsg_rl = FALSE;
msg_col = 0;
#endif
// Ask for choice.
selected = prompt_for_number(&mouse_used);
if (mouse_used)
selected -= lines_left;
lines_left = Rows; // avoid more prompt
// don't delay for 'smd' in normal_cmd()
msg_scroll = msg_scroll_save;
}
if (selected > 0 && selected <= sug.su_ga.ga_len && u_save_cursor() == OK)
{
// Save the from and to text for :spellrepall.
VIM_CLEAR(repl_from);
VIM_CLEAR(repl_to);
stp = &SUG(sug.su_ga, selected - 1);
if (sug.su_badlen > stp->st_orglen)
{
// Replacing less than "su_badlen", append the remainder to
// repl_to.
repl_from = vim_strnsave(sug.su_badptr, sug.su_badlen);
vim_snprintf((char *)IObuff, IOSIZE, "%s%.*s", stp->st_word,
sug.su_badlen - stp->st_orglen,
sug.su_badptr + stp->st_orglen);
repl_to = vim_strsave(IObuff);
}
else
{
// Replacing su_badlen or more, use the whole word.
repl_from = vim_strnsave(sug.su_badptr, stp->st_orglen);
repl_to = vim_strsave(stp->st_word);
}
// Replace the word.
p = alloc(STRLEN(line) - stp->st_orglen + stp->st_wordlen + 1);
if (p != NULL)
{
int len_diff = stp->st_wordlen - stp->st_orglen;
c = (int)(sug.su_badptr - line);
mch_memmove(p, line, c);
STRCPY(p + c, stp->st_word);
STRCAT(p, sug.su_badptr + stp->st_orglen);
// For redo we use a change-word command.
ResetRedobuff();
AppendToRedobuff((char_u *)"ciw");
AppendToRedobuffLit(p + c,
stp->st_wordlen + sug.su_badlen - stp->st_orglen);
AppendCharToRedobuff(ESC);
// "p" may be freed here
ml_replace(curwin->w_cursor.lnum, p, FALSE);
curwin->w_cursor.col = c;
changed_bytes(curwin->w_cursor.lnum, c);
if (curbuf->b_has_textprop && len_diff != 0)
adjust_prop_columns(curwin->w_cursor.lnum, c, len_diff,
APC_SUBSTITUTE);
}
}
else
curwin->w_cursor = prev_cursor;
spell_find_cleanup(&sug);
skip:
vim_free(line);
curwin->w_p_spell = wo_spell_save;
} | 0 |
ghostpdl | c9b362ba908ca4b1d7c72663a33229588012d7d9 | NOT_APPLICABLE | NOT_APPLICABLE | gx_default_discard_transparency_layer(gx_device *dev, gs_gstate *pgs)
{
return 0;
} | 0 |
GIMP | edb251a7ef1602d20a5afcbf23f24afb163de63b | NOT_APPLICABLE | NOT_APPLICABLE | int fli_write_color_2(FILE *f, s_fli_header *fli_header, unsigned char *old_cmap, unsigned char *cmap)
{
unsigned long chunkpos;
unsigned short num_packets;
s_fli_chunk chunk;
chunkpos=ftell(f);
fseek(f, chunkpos+8, SEEK_SET);
num_packets=0;
if (old_cmap==NULL) {
unsigned short col_pos;
num_packets=1;
fli_write_char(f, 0); /* skip no color */
fli_write_char(f, 0); /* 256 color */
for (col_pos=0; col_pos<768; col_pos++) {
fli_write_char(f, cmap[col_pos]);
}
} else {
unsigned short cnt_skip, cnt_col, col_pos, col_start;
col_pos=0;
do {
cnt_skip=0;
while ((col_pos<256) && (old_cmap[col_pos*3+0]==cmap[col_pos*3+0]) && (old_cmap[col_pos*3+1]==cmap[col_pos*3+1]) && (old_cmap[col_pos*3+2]==cmap[col_pos*3+2])) {
cnt_skip++; col_pos++;
}
col_start=col_pos*3;
cnt_col=0;
while ((col_pos<256) && !((old_cmap[col_pos*3+0]==cmap[col_pos*3+0]) && (old_cmap[col_pos*3+1]==cmap[col_pos*3+1]) && (old_cmap[col_pos*3+2]==cmap[col_pos*3+2]))) {
cnt_col++; col_pos++;
}
if (cnt_col>0) {
num_packets++;
fli_write_char(f, cnt_skip);
fli_write_char(f, cnt_col);
while (cnt_col>0) {
fli_write_char(f, cmap[col_start++]);
fli_write_char(f, cmap[col_start++]);
fli_write_char(f, cmap[col_start++]);
cnt_col--;
}
}
} while (col_pos<256);
}
if (num_packets>0) {
chunk.size=ftell(f)-chunkpos;
chunk.magic=FLI_COLOR_2;
fseek(f, chunkpos, SEEK_SET);
fli_write_long(f, chunk.size);
fli_write_short(f, chunk.magic);
fli_write_short(f, num_packets);
if (chunk.size & 1) chunk.size++;
fseek(f,chunkpos+chunk.size,SEEK_SET);
return 1;
}
fseek(f,chunkpos, SEEK_SET);
return 0;
} | 0 |
linux | 550fd08c2cebad61c548def135f67aba284c6162 | NOT_APPLICABLE | NOT_APPLICABLE | static int proc_close( struct inode *inode, struct file *file )
{
struct proc_data *data = file->private_data;
if (data->on_close != NULL)
data->on_close(inode, file);
kfree(data->rbuffer);
kfree(data->wbuffer);
kfree(data);
return 0;
}
| 0 |
linux-2.6 | ee6f958291e2a768fd727e7a67badfff0b67711a | NOT_APPLICABLE | NOT_APPLICABLE | static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
{
struct list_head *next = p->mnt_mounts.next;
if (next == &p->mnt_mounts) {
while (1) {
if (p == root)
return NULL;
next = p->mnt_child.next;
if (next != &p->mnt_parent->mnt_mounts)
break;
p = p->mnt_parent;
}
}
return list_entry(next, struct vfsmount, mnt_child);
} | 0 |
linux | 0c319d3a144d4b8f1ea2047fd614d2149b68f889 | NOT_APPLICABLE | NOT_APPLICABLE | nvmet_fc_free_tgtport(struct kref *ref)
{
struct nvmet_fc_tgtport *tgtport =
container_of(ref, struct nvmet_fc_tgtport, ref);
struct device *dev = tgtport->dev;
unsigned long flags;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
list_del(&tgtport->tgt_list);
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
nvmet_fc_free_ls_iodlist(tgtport);
/* let the LLDD know we've finished tearing it down */
tgtport->ops->targetport_delete(&tgtport->fc_target_port);
ida_simple_remove(&nvmet_fc_tgtport_cnt,
tgtport->fc_target_port.port_num);
ida_destroy(&tgtport->assoc_cnt);
kfree(tgtport);
put_device(dev);
}
| 0 |
gdm | 4e6e5335d29c039bed820c43bfd1c19cb62539ff | NOT_APPLICABLE | NOT_APPLICABLE | gdm_display_get_seat_id (GdmDisplay *self,
char **seat_id,
GError **error)
{
GdmDisplayPrivate *priv;
g_return_val_if_fail (GDM_IS_DISPLAY (self), FALSE);
priv = gdm_display_get_instance_private (self);
if (seat_id != NULL) {
*seat_id = g_strdup (priv->seat_id);
}
return TRUE;
} | 0 |
ImageMagick | fc6080f1321fd21e86ef916195cc110b05d9effb | NOT_APPLICABLE | NOT_APPLICABLE | MagickExport const char **GetXMLTreeProcessingInstructions(
XMLTreeInfo *xml_info,const char *target)
{
register ssize_t
i;
XMLTreeRoot
*root;
assert(xml_info != (XMLTreeInfo *) NULL);
assert((xml_info->signature == MagickSignature) ||
(((XMLTreeRoot *) xml_info)->signature == MagickSignature));
if (xml_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
root=(XMLTreeRoot *) xml_info;
while (root->root.parent != (XMLTreeInfo *) NULL)
root=(XMLTreeRoot *) root->root.parent;
i=0;
while ((root->processing_instructions[i] != (char **) NULL) &&
(strcmp(root->processing_instructions[i][0],target) != 0))
i++;
if (root->processing_instructions[i] == (char **) NULL)
return((const char **) sentinel);
return((const char **) (root->processing_instructions[i]+1));
}
| 0 |
Chrome | 2440e872debd68ae7c2a8bf9ddb34df2cce378cd | NOT_APPLICABLE | NOT_APPLICABLE | HRESULT CGaiaCredentialBase::ReportResult(
NTSTATUS status,
NTSTATUS substatus,
wchar_t** ppszOptionalStatusText,
CREDENTIAL_PROVIDER_STATUS_ICON* pcpsiOptionalStatusIcon) {
LOGFN(INFO) << "status=" << putHR(status)
<< " substatus=" << putHR(substatus);
if (status == STATUS_SUCCESS && authentication_results_) {
authentication_results_->SetKey(
kKeySID, base::Value(base::UTF16ToUTF8((BSTR)user_sid_)));
authentication_results_->SetKey(
kKeyDomain, base::Value(base::UTF16ToUTF8((BSTR)domain_)));
authentication_results_->SetKey(
kKeyUsername, base::Value(base::UTF16ToUTF8((BSTR)username_)));
authentication_results_->SetKey(
kKeyPassword, base::Value(base::UTF16ToUTF8((BSTR)password_)));
CComBSTR status_text;
HRESULT hr = ForkSaveAccountInfoStub(authentication_results_, &status_text);
if (FAILED(hr))
LOGFN(ERROR) << "ForkSaveAccountInfoStub hr=" << putHR(hr);
}
*ppszOptionalStatusText = nullptr;
*pcpsiOptionalStatusIcon = CPSI_NONE;
ResetInternalState();
return S_OK;
}
| 0 |
libXi | 19a9cd607de73947fcfb104682f203ffe4e1f4e5 | CVE-2016-7946 | CWE-284 | XGetDeviceControl(
register Display *dpy,
XDevice *dev,
int control)
{
XDeviceControl *Device = NULL;
XDeviceControl *Sav = NULL;
xDeviceState *d = NULL;
xDeviceState *sav = NULL;
xGetDeviceControlReq *req;
xGetDeviceControlReply rep;
XExtDisplayInfo *info = XInput_find_display(dpy);
LockDisplay(dpy);
if (_XiCheckExtInit(dpy, XInput_Add_XChangeDeviceControl, info) == -1)
return NULL;
GetReq(GetDeviceControl, req);
req->reqType = info->codes->major_opcode;
req->ReqType = X_GetDeviceControl;
req->deviceid = dev->device_id;
req->control = control;
if (!_XReply(dpy, (xReply *) & rep, 0, xFalse))
goto out;
if (rep.length > 0) {
unsigned long nbytes;
size_t size = 0;
if (rep.length < (INT_MAX >> 2)) {
nbytes = (unsigned long) rep.length << 2;
d = Xmalloc(nbytes);
}
_XEatDataWords(dpy, rep.length);
goto out;
}
sav = d;
_XRead(dpy, (char *)d, nbytes);
/* In theory, we should just be able to use d->length to get the size.
* Turns out that a number of X servers (up to and including server
* 1.4) sent the wrong length value down the wire. So to not break
* apps that run against older servers, we have to calculate the size
* manually.
*/
switch (d->control) {
case DEVICE_RESOLUTION:
{
xDeviceResolutionState *r;
size_t val_size;
size_t val_size;
r = (xDeviceResolutionState *) d;
if (r->num_valuators >= (INT_MAX / (3 * sizeof(int))))
goto out;
val_size = 3 * sizeof(int) * r->num_valuators;
if ((sizeof(xDeviceResolutionState) + val_size) > nbytes)
break;
}
case DEVICE_ABS_CALIB:
{
if (sizeof(xDeviceAbsCalibState) > nbytes)
goto out;
size = sizeof(XDeviceAbsCalibState);
break;
}
case DEVICE_ABS_AREA:
{
if (sizeof(xDeviceAbsAreaState) > nbytes)
goto out;
size = sizeof(XDeviceAbsAreaState);
break;
}
case DEVICE_CORE:
{
if (sizeof(xDeviceCoreState) > nbytes)
goto out;
size = sizeof(XDeviceCoreState);
break;
}
default:
if (d->length > nbytes)
goto out;
size = d->length;
break;
}
Device = Xmalloc(size);
if (!Device)
goto out;
Sav = Device;
d = sav;
switch (control) {
case DEVICE_RESOLUTION:
{
int *iptr, *iptr2;
xDeviceResolutionState *r;
XDeviceResolutionState *R;
unsigned int i;
r = (xDeviceResolutionState *) d;
R = (XDeviceResolutionState *) Device;
R->control = DEVICE_RESOLUTION;
R->length = sizeof(XDeviceResolutionState);
R->num_valuators = r->num_valuators;
iptr = (int *)(R + 1);
iptr2 = (int *)(r + 1);
R->resolutions = iptr;
R->min_resolutions = iptr + R->num_valuators;
R->max_resolutions = iptr + (2 * R->num_valuators);
for (i = 0; i < (3 * R->num_valuators); i++)
*iptr++ = *iptr2++;
break;
}
case DEVICE_ABS_CALIB:
{
xDeviceAbsCalibState *c = (xDeviceAbsCalibState *) d;
XDeviceAbsCalibState *C = (XDeviceAbsCalibState *) Device;
C->control = DEVICE_ABS_CALIB;
C->length = sizeof(XDeviceAbsCalibState);
C->min_x = c->min_x;
C->max_x = c->max_x;
C->min_y = c->min_y;
C->max_y = c->max_y;
C->flip_x = c->flip_x;
C->flip_y = c->flip_y;
C->rotation = c->rotation;
C->button_threshold = c->button_threshold;
break;
}
case DEVICE_ABS_AREA:
{
xDeviceAbsAreaState *a = (xDeviceAbsAreaState *) d;
XDeviceAbsAreaState *A = (XDeviceAbsAreaState *) Device;
A->control = DEVICE_ABS_AREA;
A->length = sizeof(XDeviceAbsAreaState);
A->offset_x = a->offset_x;
A->offset_y = a->offset_y;
A->width = a->width;
A->height = a->height;
A->screen = a->screen;
A->following = a->following;
break;
}
case DEVICE_CORE:
{
xDeviceCoreState *c = (xDeviceCoreState *) d;
XDeviceCoreState *C = (XDeviceCoreState *) Device;
C->control = DEVICE_CORE;
C->length = sizeof(XDeviceCoreState);
C->status = c->status;
C->iscore = c->iscore;
break;
}
case DEVICE_ENABLE:
{
xDeviceEnableState *e = (xDeviceEnableState *) d;
XDeviceEnableState *E = (XDeviceEnableState *) Device;
E->control = DEVICE_ENABLE;
E->length = sizeof(E);
E->enable = e->enable;
break;
}
default:
break;
}
}
| 1 |
linux | 50220dead1650609206efe91f0cc116132d59b3f | NOT_APPLICABLE | NOT_APPLICABLE | void hid_destroy_device(struct hid_device *hdev)
{
hid_remove_device(hdev);
put_device(&hdev->dev);
}
| 0 |
ghostscript | 1e03c06456d997435019fb3526fa2d4be7dbc6ec | NOT_APPLICABLE | NOT_APPLICABLE | pdf_dict_finds(fz_context *ctx, pdf_obj *obj, const char *key)
{
int len = DICT(obj)->len;
if ((obj->flags & PDF_FLAGS_SORTED) && len > 0)
{
int l = 0;
int r = len - 1;
if (strcmp(pdf_to_name(ctx, DICT(obj)->items[r].k), key) < 0)
{
return -1 - (r+1);
}
while (l <= r)
{
int m = (l + r) >> 1;
int c = -strcmp(pdf_to_name(ctx, DICT(obj)->items[m].k), key);
if (c < 0)
r = m - 1;
else if (c > 0)
l = m + 1;
else
return m;
}
return -1 - l;
}
else
{
int i;
for (i = 0; i < len; i++)
if (strcmp(pdf_to_name(ctx, DICT(obj)->items[i].k), key) == 0)
return i;
return -1 - len;
}
}
| 0 |
linux | cf01fb9985e8deb25ccf0ea54d916b8871ae0e62 | NOT_APPLICABLE | NOT_APPLICABLE | void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
{
char *p = buffer;
nodemask_t nodes = NODE_MASK_NONE;
unsigned short mode = MPOL_DEFAULT;
unsigned short flags = 0;
if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
mode = pol->mode;
flags = pol->flags;
}
switch (mode) {
case MPOL_DEFAULT:
break;
case MPOL_PREFERRED:
if (flags & MPOL_F_LOCAL)
mode = MPOL_LOCAL;
else
node_set(pol->v.preferred_node, nodes);
break;
case MPOL_BIND:
case MPOL_INTERLEAVE:
nodes = pol->v.nodes;
break;
default:
WARN_ON_ONCE(1);
snprintf(p, maxlen, "unknown");
return;
}
p += snprintf(p, maxlen, "%s", policy_modes[mode]);
if (flags & MPOL_MODE_FLAGS) {
p += snprintf(p, buffer + maxlen - p, "=");
/*
* Currently, the only defined flags are mutually exclusive
*/
if (flags & MPOL_F_STATIC_NODES)
p += snprintf(p, buffer + maxlen - p, "static");
else if (flags & MPOL_F_RELATIVE_NODES)
p += snprintf(p, buffer + maxlen - p, "relative");
}
if (!nodes_empty(nodes))
p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
nodemask_pr_args(&nodes));
}
| 0 |
linux | d9d4b1e46d9543a82c23f6df03f4ad697dab361b | NOT_APPLICABLE | NOT_APPLICABLE | static int sony_leds_init(struct sony_sc *sc)
{
struct hid_device *hdev = sc->hdev;
int n, ret = 0;
int use_ds4_names;
struct led_classdev *led;
size_t name_sz;
char *name;
size_t name_len;
const char *name_fmt;
static const char * const ds4_name_str[] = { "red", "green", "blue",
"global" };
u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
u8 use_hw_blink[MAX_LEDS] = { 0 };
BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
if (sc->quirks & BUZZ_CONTROLLER) {
sc->led_count = 4;
use_ds4_names = 0;
name_len = strlen("::buzz#");
name_fmt = "%s::buzz%d";
/* Validate expected report characteristics. */
if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
return -ENODEV;
} else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
dualshock4_set_leds_from_id(sc);
sc->led_state[3] = 1;
sc->led_count = 4;
memset(max_brightness, 255, 3);
use_hw_blink[3] = 1;
use_ds4_names = 1;
name_len = 0;
name_fmt = "%s:%s";
} else if (sc->quirks & MOTION_CONTROLLER) {
sc->led_count = 3;
memset(max_brightness, 255, 3);
use_ds4_names = 1;
name_len = 0;
name_fmt = "%s:%s";
} else if (sc->quirks & NAVIGATION_CONTROLLER) {
static const u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00};
memcpy(sc->led_state, navigation_leds, sizeof(navigation_leds));
sc->led_count = 1;
memset(use_hw_blink, 1, 4);
use_ds4_names = 0;
name_len = strlen("::sony#");
name_fmt = "%s::sony%d";
} else {
sixaxis_set_leds_from_id(sc);
sc->led_count = 4;
memset(use_hw_blink, 1, 4);
use_ds4_names = 0;
name_len = strlen("::sony#");
name_fmt = "%s::sony%d";
}
/*
* Clear LEDs as we have no way of reading their initial state. This is
* only relevant if the driver is loaded after somebody actively set the
* LEDs to on
*/
sony_set_leds(sc);
name_sz = strlen(dev_name(&hdev->dev)) + name_len + 1;
for (n = 0; n < sc->led_count; n++) {
if (use_ds4_names)
name_sz = strlen(dev_name(&hdev->dev)) + strlen(ds4_name_str[n]) + 2;
led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev) + name_sz, GFP_KERNEL);
if (!led) {
hid_err(hdev, "Couldn't allocate memory for LED %d\n", n);
return -ENOMEM;
}
name = (void *)(&led[1]);
if (use_ds4_names)
snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev),
ds4_name_str[n]);
else
snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), n + 1);
led->name = name;
led->brightness = sc->led_state[n];
led->max_brightness = max_brightness[n];
led->flags = LED_CORE_SUSPENDRESUME;
led->brightness_get = sony_led_get_brightness;
led->brightness_set = sony_led_set_brightness;
if (use_hw_blink[n])
led->blink_set = sony_led_blink_set;
sc->leds[n] = led;
ret = devm_led_classdev_register(&hdev->dev, led);
if (ret) {
hid_err(hdev, "Failed to register LED %d\n", n);
return ret;
}
}
return 0;
} | 0 |
audiofile | c48e4c6503f7dabd41f11d4c9c7b7f8960e7f2c0 | NOT_APPLICABLE | NOT_APPLICABLE | bool WAVEFile::isInstrumentParameterValid(AUpvlist list, int i)
{
int param, type;
AUpvgetparam(list, i, ¶m);
AUpvgetvaltype(list, i, &type);
if (type != AU_PVTYPE_LONG)
return false;
long lval;
AUpvgetval(list, i, &lval);
switch (param)
{
case AF_INST_MIDI_BASENOTE:
return ((lval >= 0) && (lval <= 127));
case AF_INST_NUMCENTS_DETUNE:
return ((lval >= -50) && (lval <= 50));
case AF_INST_MIDI_LOVELOCITY:
return ((lval >= 1) && (lval <= 127));
case AF_INST_MIDI_HIVELOCITY:
return ((lval >= 1) && (lval <= 127));
case AF_INST_MIDI_LONOTE:
return ((lval >= 0) && (lval <= 127));
case AF_INST_MIDI_HINOTE:
return ((lval >= 0) && (lval <= 127));
case AF_INST_NUMDBS_GAIN:
return true;
default:
return false;
}
return true;
} | 0 |
tensorflow | 8c6f391a2282684a25cbfec7687bd5d35261a209 | NOT_APPLICABLE | NOT_APPLICABLE | inline LutOutT lut_lookup_with_interpolation(int16_t value,
const LutOutT* lut) {
static_assert(std::is_same<LutOutT, int8_t>::value ||
std::is_same<LutOutT, int16_t>::value,
"Only LUTs with int8 or int16 outputs are supported.");
// 512 base values, lut[513] is only used to calculate the slope
const uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
assert(index < 512 && "LUT index out of range.");
const int16_t offset = value & 0x7f;
// Base and slope are Q0.x
const LutOutT base = lut[index];
const LutOutT slope = lut[index + 1] - lut[index];
// Q0.x * Q0.7 = Q0.(x + 7)
// Round and convert from Q0.(x + 7) to Q0.x
const int delta = (slope * offset + 64) >> 7;
// Q0.15 + Q0.15
return static_cast<LutOutT>(base + delta);
} | 0 |
linux-2.6 | 59839dfff5eabca01cc4e20b45797a60a80af8cb | NOT_APPLICABLE | NOT_APPLICABLE | int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data;
switch (msr) {
case 0xc0010010: /* SYSCFG */
case 0xc0010015: /* HWCR */
case MSR_IA32_PLATFORM_ID:
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MC0_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MCG_CAP:
case MSR_IA32_MCG_CTL:
case MSR_IA32_MC0_MISC:
case MSR_IA32_MC0_MISC+4:
case MSR_IA32_MC0_MISC+8:
case MSR_IA32_MC0_MISC+12:
case MSR_IA32_MC0_MISC+16:
case MSR_IA32_MC0_MISC+20:
case MSR_IA32_UCODE_REV:
case MSR_IA32_EBL_CR_POWERON:
case MSR_IA32_DEBUGCTLMSR:
case MSR_IA32_LASTBRANCHFROMIP:
case MSR_IA32_LASTBRANCHTOIP:
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
case MSR_VM_HSAVE_PA:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
data = 0;
break;
case MSR_MTRRcap:
data = 0x500 | KVM_NR_VAR_MTRR;
break;
case 0x200 ... 0x2ff:
return get_msr_mtrr(vcpu, msr, pdata);
case 0xcd: /* fsb frequency */
data = 3;
break;
case MSR_IA32_APICBASE:
data = kvm_get_apic_base(vcpu);
break;
case MSR_IA32_MISC_ENABLE:
data = vcpu->arch.ia32_misc_enable_msr;
break;
case MSR_IA32_PERF_STATUS:
/* TSC increment by tick */
data = 1000ULL;
/* CPU multiplier */
data |= (((uint64_t)4ULL) << 40);
break;
case MSR_EFER:
data = vcpu->arch.shadow_efer;
break;
case MSR_KVM_WALL_CLOCK:
data = vcpu->kvm->arch.wall_clock;
break;
case MSR_KVM_SYSTEM_TIME:
data = vcpu->arch.time;
break;
default:
pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
return 1;
}
*pdata = data;
return 0;
} | 0 |
unbound | 6c3a0b54ed8ace93d5b5ca7b8078dc87e75cd640 | NOT_APPLICABLE | NOT_APPLICABLE | struct edns_option* edns_opt_copy_region(struct edns_option* list,
struct regional* region)
{
struct edns_option* result = NULL, *cur = NULL, *s;
while(list) {
/* copy edns option structure */
s = regional_alloc_init(region, list, sizeof(*list));
if(!s) return NULL;
s->next = NULL;
/* copy option data */
if(s->opt_data) {
s->opt_data = regional_alloc_init(region, s->opt_data,
s->opt_len);
if(!s->opt_data)
return NULL;
}
/* link into list */
if(cur)
cur->next = s;
else result = s;
cur = s;
/* examine next element */
list = list->next;
}
return result;
} | 0 |
qemu | e3737b820b45e54b059656dc3f914f895ac7a88b | NOT_APPLICABLE | NOT_APPLICABLE | static void bochs_close(BlockDriverState *bs)
{
BDRVBochsState *s = bs->opaque;
g_free(s->catalog_bitmap);
}
| 0 |
linux | 384632e67e0829deb8015ee6ad916b180049d252 | CVE-2017-15126 | CWE-416 | static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
struct userfaultfd_wait_queue *ewq)
{
if (WARN_ON_ONCE(current->flags & PF_EXITING))
goto out;
ewq->ctx = ctx;
init_waitqueue_entry(&ewq->wq, current);
spin_lock(&ctx->event_wqh.lock);
/*
* After the __add_wait_queue the uwq is visible to userland
* through poll/read().
*/
__add_wait_queue(&ctx->event_wqh, &ewq->wq);
for (;;) {
set_current_state(TASK_KILLABLE);
if (ewq->msg.event == 0)
break;
if (ACCESS_ONCE(ctx->released) ||
fatal_signal_pending(current)) {
__remove_wait_queue(&ctx->event_wqh, &ewq->wq);
if (ewq->msg.event == UFFD_EVENT_FORK) {
struct userfaultfd_ctx *new;
new = (struct userfaultfd_ctx *)
(unsigned long)
ewq->msg.arg.reserved.reserved1;
userfaultfd_ctx_put(new);
}
break;
}
spin_unlock(&ctx->event_wqh.lock);
wake_up_poll(&ctx->fd_wqh, POLLIN);
schedule();
spin_lock(&ctx->event_wqh.lock);
}
__set_current_state(TASK_RUNNING);
spin_unlock(&ctx->event_wqh.lock);
/*
* ctx may go away after this if the userfault pseudo fd is
* already released.
*/
out:
userfaultfd_ctx_put(ctx);
}
| 1 |
Chrome | 4d17163f4b66be517dc49019a029e5ddbd45078c | NOT_APPLICABLE | NOT_APPLICABLE | PassRefPtr<RenderStyle> StyleResolver::styleForDocument(Document& document, CSSFontSelector* fontSelector)
{
const Frame* frame = document.frame();
RefPtr<RenderStyle> documentStyle = RenderStyle::create();
bool seamlessWithParent = document.shouldDisplaySeamlesslyWithParent();
if (seamlessWithParent) {
RenderStyle* iframeStyle = document.seamlessParentIFrame()->renderStyle();
if (iframeStyle)
documentStyle->inheritFrom(iframeStyle);
}
documentStyle->setDisplay(BLOCK);
if (!seamlessWithParent) {
documentStyle->setRTLOrdering(document.visuallyOrdered() ? VisualOrder : LogicalOrder);
documentStyle->setZoom(frame && !document.printing() ? frame->pageZoomFactor() : 1);
documentStyle->setLocale(document.contentLanguage());
}
documentStyle->setUserModify(document.inDesignMode() ? READ_WRITE : READ_ONLY);
document.setStyleDependentState(documentStyle.get());
return documentStyle.release();
}
| 0 |
linux | d3bd7413e0ca40b60cf60d4003246d067cafdeda | CVE-2019-7308 | CWE-189 | static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg,
struct bpf_reg_state *dst_reg,
bool off_is_neg)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_insn_aux_data *aux = cur_aux(env);
bool ptr_is_dst_reg = ptr_reg == dst_reg;
u8 opcode = BPF_OP(insn->code);
u32 alu_state, alu_limit;
struct bpf_reg_state tmp;
bool ret;
if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
return 0;
/* We already marked aux for masking from non-speculative
* paths, thus we got here in the first place. We only care
* to explore bad access from here.
*/
if (vstate->speculative)
goto do_sim;
alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
alu_state |= ptr_is_dst_reg ?
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
return 0;
/* If we arrived here from different branches with different
* limits to sanitize, then this won't work.
*/
if (aux->alu_state &&
(aux->alu_state != alu_state ||
aux->alu_limit != alu_limit))
return -EACCES;
/* Corresponding fixup done in fixup_bpf_calls(). */
aux->alu_state = alu_state;
aux->alu_limit = alu_limit;
do_sim:
/* Simulate and find potential out-of-bounds access under
* speculative execution from truncation as a result of
* masking when off was not within expected range. If off
* sits in dst, then we temporarily need to move ptr there
* to simulate dst (== 0) +/-= ptr. Needed, for example,
* for cases where we use K-based arithmetic in one direction
* and truncated reg-based in the other in order to explore
* bad access.
*/
if (!ptr_is_dst_reg) {
tmp = *dst_reg;
*dst_reg = *ptr_reg;
}
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
if (!ptr_is_dst_reg)
*dst_reg = tmp;
return !ret ? -EFAULT : 0;
}
| 1 |
dbus | c3223ba6c401ba81df1305851312a47c485e6cd7 | NOT_APPLICABLE | NOT_APPLICABLE | _dbus_header_set_field_basic (DBusHeader *header,
int field,
int type,
const void *value)
{
_dbus_assert (field <= DBUS_HEADER_FIELD_LAST);
if (!reserve_header_padding (header))
return FALSE;
/* If the field exists we set, otherwise we append */
if (_dbus_header_cache_check (header, field))
{
DBusTypeReader reader;
DBusTypeReader realign_root;
if (!find_field_for_modification (header, field,
&reader, &realign_root))
_dbus_assert_not_reached ("field was marked present in cache but wasn't found");
if (!set_basic_field (&reader, field, type, value, &realign_root))
return FALSE;
}
else
{
DBusTypeWriter writer;
DBusTypeWriter array;
_dbus_type_writer_init_values_only (&writer,
header->byte_order,
&_dbus_header_signature_str,
FIELDS_ARRAY_SIGNATURE_OFFSET,
&header->data,
FIELDS_ARRAY_LENGTH_OFFSET);
/* recurse into array without creating a new length, and jump to
* end of array.
*/
if (!_dbus_type_writer_append_array (&writer,
&_dbus_header_signature_str,
FIELDS_ARRAY_ELEMENT_SIGNATURE_OFFSET,
&array))
_dbus_assert_not_reached ("recurse into ARRAY should not have used memory");
_dbus_assert (array.u.array.len_pos == FIELDS_ARRAY_LENGTH_OFFSET);
_dbus_assert (array.u.array.start_pos == FIRST_FIELD_OFFSET);
_dbus_assert (array.value_pos == HEADER_END_BEFORE_PADDING (header));
if (!write_basic_field (&array,
field, type, value))
return FALSE;
if (!_dbus_type_writer_unrecurse (&writer, &array))
_dbus_assert_not_reached ("unrecurse from ARRAY should not have used memory");
}
correct_header_padding (header);
/* We could be smarter about this (only invalidate fields after the
* one we modified, or even only if the one we modified changed
* length). But this hack is a start.
*/
_dbus_header_cache_invalidate_all (header);
return TRUE;
}
| 0 |
tip | 7bdb157cdebbf95a1cd94ed2e01b338714075d00 | NOT_APPLICABLE | NOT_APPLICABLE | static void perf_event_addr_filters_apply(struct perf_event *event)
{
struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
struct task_struct *task = READ_ONCE(event->ctx->task);
struct perf_addr_filter *filter;
struct mm_struct *mm = NULL;
unsigned int count = 0;
unsigned long flags;
/*
* We may observe TASK_TOMBSTONE, which means that the event tear-down
* will stop on the parent's child_mutex that our caller is also holding
*/
if (task == TASK_TOMBSTONE)
return;
if (ifh->nr_file_filters) {
mm = get_task_mm(event->ctx->task);
if (!mm)
goto restart;
mmap_read_lock(mm);
}
raw_spin_lock_irqsave(&ifh->lock, flags);
list_for_each_entry(filter, &ifh->list, entry) {
if (filter->path.dentry) {
/*
* Adjust base offset if the filter is associated to a
* binary that needs to be mapped:
*/
event->addr_filter_ranges[count].start = 0;
event->addr_filter_ranges[count].size = 0;
perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
} else {
event->addr_filter_ranges[count].start = filter->offset;
event->addr_filter_ranges[count].size = filter->size;
}
count++;
}
event->addr_filters_gen++;
raw_spin_unlock_irqrestore(&ifh->lock, flags);
if (ifh->nr_file_filters) {
mmap_read_unlock(mm);
mmput(mm);
}
restart:
perf_event_stop(event, 1);
} | 0 |
u-boot | 2ac0baab4aff1a0b45067d0b62f00c15f4e86856 | NOT_APPLICABLE | NOT_APPLICABLE | static int sqfs_disk_read(__u32 block, __u32 nr_blocks, void *buf)
{
ulong ret;
if (!ctxt.cur_dev)
return -1;
ret = blk_dread(ctxt.cur_dev, ctxt.cur_part_info.start + block,
nr_blocks, buf);
if (ret != nr_blocks)
return -1;
return ret;
} | 0 |
cyrus-imapd | 621f9e41465b521399f691c241181300fab55995 | NOT_APPLICABLE | NOT_APPLICABLE | EXPORTED void annotatemore_close(void)
{
/* close all the open databases */
while (all_dbs_head)
annotate_closedb(all_dbs_head);
annotatemore_dbopen = 0;
} | 0 |
linux | c444eb564fb16645c172d550359cb3d75fe8a040 | NOT_APPLICABLE | NOT_APPLICABLE | static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
struct page *zero_page)
{
pmd_t entry;
if (!pmd_none(*pmd))
return false;
entry = mk_pmd(zero_page, vma->vm_page_prot);
entry = pmd_mkhuge(entry);
if (pgtable)
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry);
mm_inc_nr_ptes(mm);
return true;
} | 0 |
linux | e4f3aa2e1e67bb48dfbaaf1cad59013d5a5bc276 | NOT_APPLICABLE | NOT_APPLICABLE | static void setup_report_key(struct packet_command *cgc, unsigned agid, unsigned type)
{
cgc->cmd[0] = GPCMD_REPORT_KEY;
cgc->cmd[10] = type | (agid << 6);
switch (type) {
case 0: case 8: case 5: {
cgc->buflen = 8;
break;
}
case 1: {
cgc->buflen = 16;
break;
}
case 2: case 4: {
cgc->buflen = 12;
break;
}
}
cgc->cmd[9] = cgc->buflen;
cgc->data_direction = CGC_DATA_READ;
}
| 0 |
mupdf | b2e7d38e845c7d4922d05e6e41f3a2dc1bc1b14a | NOT_APPLICABLE | NOT_APPLICABLE | static void pdf_run_ET(fz_context *ctx, pdf_processor *proc)
{
pdf_run_processor *pr = (pdf_run_processor *)proc;
pdf_flush_text(ctx, pr);
} | 0 |
openssl | 56f1acf5ef8a432992497a04792ff4b3b2c6f286 | NOT_APPLICABLE | NOT_APPLICABLE | const COMP_METHOD *SSL_get_current_expansion(SSL *s)
{
if (s->expand != NULL)
return (s->expand->meth);
return (NULL);
} | 0 |
linux | 9899d11f654474d2d54ea52ceaa2a1f4db3abd68 | CVE-2013-0871 | CWE-362 | static inline int may_ptrace_stop(void)
{
if (!likely(current->ptrace))
return 0;
/*
* Are we in the middle of do_coredump?
* If so and our tracer is also part of the coredump stopping
* is a deadlock situation, and pointless because our tracer
* is dead so don't allow us to stop.
* If SIGKILL was already sent before the caller unlocked
* ->siglock we must see ->core_state != NULL. Otherwise it
* is safe to enter schedule().
*/
if (unlikely(current->mm->core_state) &&
unlikely(current->mm == current->parent->mm))
return 0;
return 1;
}
| 1 |
Chrome | 5cfe3023574666663d970ce48cdbc8ed15ce61d9 | NOT_APPLICABLE | NOT_APPLICABLE | void AutofillDialogViews::ContentsChanged(views::Textfield* sender,
const base::string16& new_contents) {
InputEditedOrActivated(TypeForTextfield(sender),
sender->GetBoundsInScreen(),
true);
const ExpandingTextfield* expanding = static_cast<ExpandingTextfield*>(
sender->GetAncestorWithClassName(ExpandingTextfield::kViewClassName));
if (expanding && expanding->needs_layout())
ContentsPreferredSizeChanged();
}
| 0 |
Chrome | 385508dc888ef15d272cdd2705b17996abc519d6 | NOT_APPLICABLE | NOT_APPLICABLE | void GLES2DecoderImpl::RestoreTextureState(unsigned service_id) const {
Texture* texture = texture_manager()->GetTextureForServiceId(service_id);
if (texture) {
GLenum target = texture->target();
api()->glBindTextureFn(target, service_id);
api()->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, texture->wrap_s());
api()->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, texture->wrap_t());
api()->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER,
texture->min_filter());
api()->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER,
texture->mag_filter());
if (feature_info_->IsWebGL2OrES3Context()) {
api()->glTexParameteriFn(target, GL_TEXTURE_BASE_LEVEL,
texture->base_level());
}
RestoreTextureUnitBindings(state_.active_texture_unit);
}
}
| 0 |
jasper | e96fc4fdd525fa0ede28074a7e2b1caf94b58b0d | NOT_APPLICABLE | NOT_APPLICABLE | char *jas_stream_gets(jas_stream_t *stream, char *buf, int bufsize)
{
int c;
char *bufptr;
assert(bufsize > 0);
JAS_DBGLOG(100, ("jas_stream_gets(%p, %p, %d)\n", stream, buf, bufsize));
bufptr = buf;
while (bufsize > 1) {
if ((c = jas_stream_getc(stream)) == EOF) {
break;
}
*bufptr++ = c;
--bufsize;
if (c == '\n') {
break;
}
}
*bufptr = '\0';
return buf;
}
| 0 |
u-boot | 8f8c04bf1ebbd2f72f1643e7ad9617dafa6e5409 | NOT_APPLICABLE | NOT_APPLICABLE | static int i2c_get_cur_bus(struct udevice **busp)
{
#ifdef CONFIG_I2C_SET_DEFAULT_BUS_NUM
if (!i2c_cur_bus) {
if (cmd_i2c_set_bus_num(CONFIG_I2C_DEFAULT_BUS_NUMBER)) {
printf("Default I2C bus %d not found\n",
CONFIG_I2C_DEFAULT_BUS_NUMBER);
return -ENODEV;
}
}
#endif
if (!i2c_cur_bus) {
puts("No I2C bus selected\n");
return -ENODEV;
}
*busp = i2c_cur_bus;
return 0;
} | 0 |
gpac | f36525c5beafb78959c3a07d6622c9028de348da | NOT_APPLICABLE | NOT_APPLICABLE | static time_t gf_mktime_utc(struct tm *tm)
{
return timegm(tm);
}
| 0 |
Chrome | 94bb8861ec61b4ebcce8a4489be2cf7e2a055d90 | NOT_APPLICABLE | NOT_APPLICABLE | ConvolverNode::~ConvolverNode()
{
uninitialize();
}
| 0 |
ImageMagick | 94691f00839dbdf43edb1508af945ab19b388573 | NOT_APPLICABLE | NOT_APPLICABLE | MagickExport MagickBooleanType IsPaletteImage(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class != PseudoClass)
return(MagickFalse);
return((image->colors <= 256) ? MagickTrue : MagickFalse);
} | 0 |
openjpeg | 5d00b719f4b93b1445e6fb4c766b9a9883c57949 | NOT_APPLICABLE | NOT_APPLICABLE | void opj_pi_update_decode_poc (opj_pi_iterator_t * p_pi,
opj_tcp_t * p_tcp,
OPJ_UINT32 p_max_precision,
OPJ_UINT32 p_max_res)
{
/* loop*/
OPJ_UINT32 pino;
/* encoding prameters to set*/
OPJ_UINT32 l_bound;
opj_pi_iterator_t * l_current_pi = 00;
opj_poc_t* l_current_poc = 0;
OPJ_ARG_NOT_USED(p_max_res);
/* preconditions in debug*/
assert(p_pi != 00);
assert(p_tcp != 00);
/* initializations*/
l_bound = p_tcp->numpocs+1;
l_current_pi = p_pi;
l_current_poc = p_tcp->pocs;
for (pino = 0;pino<l_bound;++pino) {
l_current_pi->poc.prg = l_current_poc->prg; /* Progression Order #0 */
l_current_pi->first = 1;
l_current_pi->poc.resno0 = l_current_poc->resno0; /* Resolution Level Index #0 (Start) */
l_current_pi->poc.compno0 = l_current_poc->compno0; /* Component Index #0 (Start) */
l_current_pi->poc.layno0 = 0;
l_current_pi->poc.precno0 = 0;
l_current_pi->poc.resno1 = l_current_poc->resno1; /* Resolution Level Index #0 (End) */
l_current_pi->poc.compno1 = l_current_poc->compno1; /* Component Index #0 (End) */
l_current_pi->poc.layno1 = l_current_poc->layno1; /* Layer Index #0 (End) */
l_current_pi->poc.precno1 = p_max_precision;
++l_current_pi;
++l_current_poc;
}
}
| 0 |
cpython | c3c9db89273fabc62ea1b48389d9a3000c1c03ae | NOT_APPLICABLE | NOT_APPLICABLE | string_istitle(PyStringObject *self, PyObject *uncased)
{
register const unsigned char *p
= (unsigned char *) PyString_AS_STRING(self);
register const unsigned char *e;
int cased, previous_is_cased;
/* Shortcut for single character strings */
if (PyString_GET_SIZE(self) == 1)
return PyBool_FromLong(isupper(*p) != 0);
/* Special case for empty strings */
if (PyString_GET_SIZE(self) == 0)
return PyBool_FromLong(0);
e = p + PyString_GET_SIZE(self);
cased = 0;
previous_is_cased = 0;
for (; p < e; p++) {
register const unsigned char ch = *p;
if (isupper(ch)) {
if (previous_is_cased)
return PyBool_FromLong(0);
previous_is_cased = 1;
cased = 1;
}
else if (islower(ch)) {
if (!previous_is_cased)
return PyBool_FromLong(0);
previous_is_cased = 1;
cased = 1;
}
else
previous_is_cased = 0;
}
return PyBool_FromLong(cased);
} | 0 |
harfbuzz | a6a79df5fe2ed2cd307e7a991346faee164e70d9 | NOT_APPLICABLE | NOT_APPLICABLE | hb_buffer_ensure_separate (hb_buffer_t *buffer, unsigned int size)
{
if (unlikely (!hb_buffer_ensure (buffer, size))) return FALSE;
if (buffer->out_info == buffer->info)
{
assert (buffer->have_output);
buffer->out_info = (hb_internal_glyph_info_t *) buffer->pos;
memcpy (buffer->out_info, buffer->info, buffer->out_len * sizeof (buffer->out_info[0]));
}
return TRUE;
}
| 0 |
Chrome | ce70785c73a2b7cf2b34de0d8439ca31929b4743 | NOT_APPLICABLE | NOT_APPLICABLE | bool LayoutBlockFlow::mustDiscardMarginAfterForChild(const LayoutBox& child) const
{
ASSERT(!child.selfNeedsLayout());
if (!child.isWritingModeRoot())
return child.isLayoutBlockFlow() ? toLayoutBlockFlow(&child)->mustDiscardMarginAfter() : (child.style()->marginAfterCollapse() == MDISCARD);
if (child.isHorizontalWritingMode() == isHorizontalWritingMode())
return child.isLayoutBlockFlow() ? toLayoutBlockFlow(&child)->mustDiscardMarginBefore() : (child.style()->marginBeforeCollapse() == MDISCARD);
return false;
}
| 0 |
w3m | 7b88478227978a8d673b4dd0e05eee410cc33330 | CVE-2016-9432 | CWE-119 | formUpdateBuffer(Anchor *a, Buffer *buf, FormItemList *form)
{
Buffer save;
char *p;
int spos, epos, rows, c_rows, pos, col = 0;
Line *l;
copyBuffer(&save, buf);
gotoLine(buf, a->start.line);
switch (form->type) {
case FORM_TEXTAREA:
case FORM_INPUT_TEXT:
case FORM_INPUT_FILE:
case FORM_INPUT_PASSWORD:
case FORM_INPUT_CHECKBOX:
case FORM_INPUT_RADIO:
#ifdef MENU_SELECT
case FORM_SELECT:
#endif /* MENU_SELECT */
spos = a->start.pos;
epos = a->end.pos;
break;
default:
spos = a->start.pos + 1;
epos = a->end.pos - 1;
}
switch (form->type) {
case FORM_INPUT_CHECKBOX:
case FORM_INPUT_RADIO:
if (form->checked)
buf->currentLine->lineBuf[spos] = '*';
else
buf->currentLine->lineBuf[spos] = ' ';
break;
case FORM_INPUT_TEXT:
case FORM_INPUT_FILE:
case FORM_INPUT_PASSWORD:
case FORM_TEXTAREA:
#ifdef MENU_SELECT
case FORM_SELECT:
if (form->type == FORM_SELECT) {
p = form->label->ptr;
updateSelectOption(form, form->select_option);
}
else
#endif /* MENU_SELECT */
p = form->value->ptr;
l = buf->currentLine;
if (!l)
break;
if (form->type == FORM_TEXTAREA) {
int n = a->y - buf->currentLine->linenumber;
if (n > 0)
for (; l && n; l = l->prev, n--) ;
else if (n < 0)
for (; l && n; l = l->prev, n++) ;
if (!l)
break;
}
rows = form->rows ? form->rows : 1;
col = COLPOS(l, a->start.pos);
for (c_rows = 0; c_rows < rows; c_rows++, l = l->next) {
if (rows > 1) {
pos = columnPos(l, col);
a = retrieveAnchor(buf->formitem, l->linenumber, pos);
if (a == NULL)
break;
spos = a->start.pos;
epos = a->end.pos;
}
if (a->start.line != a->end.line || spos > epos || epos > l->len)
epos = spos;
pos = form_update_line(l, &p, spos, epos, COLPOS(l, epos) - col,
rows > 1,
form->type == FORM_INPUT_PASSWORD);
if (pos != epos) {
shiftAnchorPosition(buf->href, buf->hmarklist,
a->start.line, spos, pos - epos);
shiftAnchorPosition(buf->name, buf->hmarklist,
a->start.line, spos, pos - epos);
shiftAnchorPosition(buf->img, buf->hmarklist,
a->start.line, spos, pos - epos);
shiftAnchorPosition(buf->formitem, buf->hmarklist,
a->start.line, spos, pos - epos);
}
}
break;
}
copyBuffer(buf, &save);
arrangeLine(buf);
} | 1 |
kvm | c73f4c998e1fd4249b9edfa39e23f4fda2b9b041 | NOT_APPLICABLE | NOT_APPLICABLE | static int handle_vmon(struct kvm_vcpu *vcpu)
{
int ret;
gpa_t vmptr;
struct page *page;
struct vcpu_vmx *vmx = to_vmx(vcpu);
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
/*
* The Intel VMX Instruction Reference lists a bunch of bits that are
* prerequisite to running VMXON, most notably cr4.VMXE must be set to
* 1 (see vmx_set_cr4() for when we allow the guest to set this).
* Otherwise, we should fail with #UD. But most faulting conditions
* have already been checked by hardware, prior to the VM-exit for
* VMXON. We do test guest cr4.VMXE because processor CR4 always has
* that bit set to 1 in non-root mode.
*/
if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
/* CPL=0 must be checked manually. */
if (vmx_get_cpl(vcpu)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
if (vmx->nested.vmxon)
return nested_vmx_failValid(vcpu,
VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
!= VMXON_NEEDED_FEATURES) {
kvm_inject_gp(vcpu, 0);
return 1;
}
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
/*
* SDM 3: 24.11.5
* The first 4 bytes of VMXON region contain the supported
* VMCS revision identifier
*
* Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
* which replaces physical address width with 32
*/
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
return nested_vmx_failInvalid(vcpu);
page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
if (is_error_page(page))
return nested_vmx_failInvalid(vcpu);
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
kunmap(page);
kvm_release_page_clean(page);
return nested_vmx_failInvalid(vcpu);
}
kunmap(page);
kvm_release_page_clean(page);
vmx->nested.vmxon_ptr = vmptr;
ret = enter_vmx_operation(vcpu);
if (ret)
return ret;
return nested_vmx_succeed(vcpu);
} | 0 |
gpac | 9ea93a2ec8f555ceed1ee27294cf94822f14f10f | NOT_APPLICABLE | NOT_APPLICABLE | GF_Err avcc_box_size(GF_Box *s)
{
u32 i, count;
GF_AVCConfigurationBox *ptr = (GF_AVCConfigurationBox *)s;
if (!ptr->config) {
ptr->size = 0;
return GF_OK;
}
ptr->size += 7;
count = gf_list_count(ptr->config->sequenceParameterSets);
for (i=0; i<count; i++)
ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ptr->config->sequenceParameterSets, i))->size;
count = gf_list_count(ptr->config->pictureParameterSets);
for (i=0; i<count; i++)
ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ptr->config->pictureParameterSets, i))->size;
if ((ptr->type==GF_ISOM_BOX_TYPE_AVCC) || (ptr->type==GF_ISOM_BOX_TYPE_AVCE)) {
if (gf_avcc_use_extensions(ptr->config->AVCProfileIndication)) {
ptr->size += 4;
count = ptr->config->sequenceParameterSetExtensions ?gf_list_count(ptr->config->sequenceParameterSetExtensions) : 0;
for (i=0; i<count; i++)
ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ptr->config->sequenceParameterSetExtensions, i))->size;
}
}
return GF_OK;
} | 0 |
gst-plugins-bad | 11353b3f6e2f047cc37483d21e6a37ae558896bc | NOT_APPLICABLE | NOT_APPLICABLE | gst_h264_parse_sps_mvc_data (NalReader * nr, GstH264SPS * sps)
{
GstH264SPSExtMVC *const mvc = &sps->extension.mvc;
guint8 bit_equal_to_one;
guint i, j, k;
READ_UINT8 (nr, bit_equal_to_one, 1);
if (!bit_equal_to_one)
return FALSE;
sps->extension_type = GST_H264_NAL_EXTENSION_MVC;
READ_UE_MAX (nr, mvc->num_views_minus1, GST_H264_MAX_VIEW_COUNT - 1);
mvc->view = g_new0 (GstH264SPSExtMVCView, mvc->num_views_minus1 + 1);
if (!mvc->view)
goto error_allocation_failed;
for (i = 0; i <= mvc->num_views_minus1; i++)
READ_UE_MAX (nr, mvc->view[i].view_id, GST_H264_MAX_VIEW_ID);
for (i = 1; i <= mvc->num_views_minus1; i++) {
/* for RefPicList0 */
READ_UE_MAX (nr, mvc->view[i].num_anchor_refs_l0, 15);
for (j = 0; j < mvc->view[i].num_anchor_refs_l0; j++) {
READ_UE_MAX (nr, mvc->view[i].anchor_ref_l0[j], GST_H264_MAX_VIEW_ID);
}
/* for RefPicList1 */
READ_UE_MAX (nr, mvc->view[i].num_anchor_refs_l1, 15);
for (j = 0; j < mvc->view[i].num_anchor_refs_l1; j++) {
READ_UE_MAX (nr, mvc->view[i].anchor_ref_l1[j], GST_H264_MAX_VIEW_ID);
}
}
for (i = 1; i <= mvc->num_views_minus1; i++) {
/* for RefPicList0 */
READ_UE_MAX (nr, mvc->view[i].num_non_anchor_refs_l0, 15);
for (j = 0; j < mvc->view[i].num_non_anchor_refs_l0; j++) {
READ_UE_MAX (nr, mvc->view[i].non_anchor_ref_l0[j], GST_H264_MAX_VIEW_ID);
}
/* for RefPicList1 */
READ_UE_MAX (nr, mvc->view[i].num_non_anchor_refs_l1, 15);
for (j = 0; j < mvc->view[i].num_non_anchor_refs_l1; j++) {
READ_UE_MAX (nr, mvc->view[i].non_anchor_ref_l1[j], GST_H264_MAX_VIEW_ID);
}
}
READ_UE_MAX (nr, mvc->num_level_values_signalled_minus1, 63);
mvc->level_value =
g_new0 (GstH264SPSExtMVCLevelValue,
mvc->num_level_values_signalled_minus1 + 1);
if (!mvc->level_value)
goto error_allocation_failed;
for (i = 0; i <= mvc->num_level_values_signalled_minus1; i++) {
GstH264SPSExtMVCLevelValue *const level_value = &mvc->level_value[i];
READ_UINT8 (nr, level_value->level_idc, 8);
READ_UE_MAX (nr, level_value->num_applicable_ops_minus1, 1023);
level_value->applicable_op =
g_new0 (GstH264SPSExtMVCLevelValueOp,
level_value->num_applicable_ops_minus1 + 1);
if (!level_value->applicable_op)
goto error_allocation_failed;
for (j = 0; j <= level_value->num_applicable_ops_minus1; j++) {
GstH264SPSExtMVCLevelValueOp *const op = &level_value->applicable_op[j];
READ_UINT8 (nr, op->temporal_id, 3);
READ_UE_MAX (nr, op->num_target_views_minus1, 1023);
op->target_view_id = g_new (guint16, op->num_target_views_minus1 + 1);
if (!op->target_view_id)
goto error_allocation_failed;
for (k = 0; k <= op->num_target_views_minus1; k++)
READ_UE_MAX (nr, op->target_view_id[k], GST_H264_MAX_VIEW_ID);
READ_UE_MAX (nr, op->num_views_minus1, 1023);
}
}
return TRUE;
error_allocation_failed:
GST_WARNING ("failed to allocate memory");
gst_h264_sps_clear (sps);
return FALSE;
error:
gst_h264_sps_clear (sps);
return FALSE;
} | 0 |
libgit2 | 9844d38bed10e9ff17174434b3421b227ae710f3 | NOT_APPLICABLE | NOT_APPLICABLE | static int hdr_sz(
size_t *size,
const unsigned char **delta,
const unsigned char *end)
{
const unsigned char *d = *delta;
size_t r = 0;
unsigned int c, shift = 0;
do {
if (d == end) {
giterr_set(GITERR_INVALID, "truncated delta");
return -1;
}
c = *d++;
r |= (c & 0x7f) << shift;
shift += 7;
} while (c & 0x80);
*delta = d;
*size = r;
return 0;
}
| 0 |
u-boot | master | CVE-2019-13106 | CWE-787 | void board_init_f_init_reserve(ulong base)
{
struct global_data *gd_ptr;
/*
* clear GD entirely and set it up.
* Use gd_ptr, as gd may not be properly set yet.
*/
gd_ptr = (struct global_data *)base;
/* zero the area */
memset(gd_ptr, '\0', sizeof(*gd));
/* set GD unless architecture did it already */
#if !defined(CONFIG_ARM)
arch_setup_gd(gd_ptr);
#endif
if (CONFIG_IS_ENABLED(SYS_REPORT_STACK_F_USAGE))
board_init_f_init_stack_protection_addr(base);
/* next alloc will be higher by one GD plus 16-byte alignment */
base += roundup(sizeof(struct global_data), 16);
/*
* record early malloc arena start.
* Use gd as it is now properly set for all architectures.
*/
#if CONFIG_VAL(SYS_MALLOC_F_LEN)
/* go down one 'early malloc arena' */
gd->malloc_base = base;
/* next alloc will be higher by one 'early malloc arena' size */
base += CONFIG_VAL(SYS_MALLOC_F_LEN);
#endif
if (CONFIG_IS_ENABLED(SYS_REPORT_STACK_F_USAGE))
board_init_f_init_stack_protection();
}
| 1 |
linux | f62f3c20647ebd5fb6ecb8f0b477b9281c44c10a | NOT_APPLICABLE | NOT_APPLICABLE | static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
{
u32 irq;
int rc;
if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
rc = -3;
goto out;
}
irq = be32_to_cpu(args->args[0]);
if (xics_on_xive())
rc = kvmppc_xive_int_on(vcpu->kvm, irq);
else
rc = kvmppc_xics_int_on(vcpu->kvm, irq);
if (rc)
rc = -3;
out:
args->rets[0] = cpu_to_be32(rc);
} | 0 |
git | 7c3745fc6185495d5765628b4dfe1bd2c25a2981 | NOT_APPLICABLE | NOT_APPLICABLE | const char *git_path(const char *fmt, ...)
{
struct strbuf *pathname = get_pathname();
va_list args;
va_start(args, fmt);
do_git_path(the_repository, NULL, pathname, fmt, args);
va_end(args);
return pathname->buf;
} | 0 |
Chrome | 2440e872debd68ae7c2a8bf9ddb34df2cce378cd | NOT_APPLICABLE | NOT_APPLICABLE | bool IsEnrolledWithGoogleMdm(const base::string16& mdm_url) {
switch (g_enrolled_status) {
case EnrolledStatus::kForceTrue:
return true;
case EnrolledStatus::kForceFalse:
return false;
case EnrolledStatus::kDontForce:
break;
}
base::ScopedNativeLibrary library(
base::FilePath(FILE_PATH_LITERAL("MDMRegistration.dll")));
auto get_device_registration_info_function =
GET_MDM_FUNCTION_POINTER(library, GetDeviceRegistrationInfo);
if (!get_device_registration_info_function) {
LOGFN(ERROR) << "GET_MDM_FUNCTION_POINTER(GetDeviceRegistrationInfo)";
return false;
}
MANAGEMENT_REGISTRATION_INFO* info;
HRESULT hr = get_device_registration_info_function(
DeviceRegistrationBasicInfo, reinterpret_cast<void**>(&info));
bool is_enrolled = SUCCEEDED(hr) && info->fDeviceRegisteredWithManagement &&
GURL(mdm_url) == GURL(info->pszMDMServiceUri);
if (SUCCEEDED(hr))
::HeapFree(::GetProcessHeap(), 0, info);
return is_enrolled;
}
| 0 |
Chrome | 504e0c45030f76bffda93f0857e7595216d6e7a4 | NOT_APPLICABLE | NOT_APPLICABLE | bool RcdBetterThan(const std::string& a, const std::string& b) {
if (a == b)
return false;
if (a == "com")
return true;
if (a == "net")
return b != "com";
if (a == "org")
return b != "com" && b != "net";
return false;
}
| 0 |
char-misc | 95a69adab9acfc3981c504737a2b6578e4d846ef | NOT_APPLICABLE | NOT_APPLICABLE | static void kvp_update_mem_state(int pool)
{
FILE *filep;
size_t records_read = 0;
struct kvp_record *record = kvp_file_info[pool].records;
struct kvp_record *readp;
int num_blocks = kvp_file_info[pool].num_blocks;
int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
kvp_acquire_lock(pool);
filep = fopen(kvp_file_info[pool].fname, "r");
if (!filep) {
kvp_release_lock(pool);
syslog(LOG_ERR, "Failed to open file, pool: %d", pool);
exit(EXIT_FAILURE);
}
for (;;) {
readp = &record[records_read];
records_read += fread(readp, sizeof(struct kvp_record),
ENTRIES_PER_BLOCK * num_blocks,
filep);
if (ferror(filep)) {
syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
exit(EXIT_FAILURE);
}
if (!feof(filep)) {
/*
* We have more data to read.
*/
num_blocks++;
record = realloc(record, alloc_unit * num_blocks);
if (record == NULL) {
syslog(LOG_ERR, "malloc failed");
exit(EXIT_FAILURE);
}
continue;
}
break;
}
kvp_file_info[pool].num_blocks = num_blocks;
kvp_file_info[pool].records = record;
kvp_file_info[pool].num_records = records_read;
fclose(filep);
kvp_release_lock(pool);
} | 0 |
pam_radius | 01173ec2426627dbb1e0d96c06c3ffa0b14d36d0 | NOT_APPLICABLE | NOT_APPLICABLE | static int talk_radius(radius_conf_t *conf, AUTH_HDR *request, AUTH_HDR *response,
char *password, char *old_password, int tries)
{
socklen_t salen;
int total_length;
fd_set set;
struct timeval tv;
time_t now, end;
int rcode;
struct sockaddr saremote;
struct sockaddr_in *s_in = (struct sockaddr_in *) &saremote;
radius_server_t *server = conf->server;
int ok;
int server_tries;
int retval;
/* ************************************************************ */
/* Now that we're done building the request, we can send it */
/*
Hmm... on password change requests, all of the found server information
could be saved with a pam_set_data(), which means even the radius_conf_t
information will have to be malloc'd at some point
On the other hand, we could just try all of the servers again in
sequence, on the off chance that one may have ended up fixing itself.
*/
/* loop over all available servers */
while (server != NULL) {
/* clear the response */
memset(response, 0, sizeof(AUTH_HDR));
/* only look up IP information as necessary */
if ((retval = host2server(server)) != PAM_SUCCESS) {
_pam_log(LOG_ERR,
"Failed looking up IP address for RADIUS server %s (errcode=%d)",
server->hostname, retval);
ok = FALSE;
goto next; /* skip to the next server */
}
/* set up per-server IP && port configuration */
memset ((char *) s_in, '\0', sizeof(struct sockaddr));
s_in->sin_family = AF_INET;
s_in->sin_addr.s_addr = htonl(server->ip.s_addr);
s_in->sin_port = server->port;
total_length = ntohs(request->length);
if (!password) { /* make an RFC 2139 p6 request authenticator */
get_accounting_vector(request, server);
}
server_tries = tries;
send:
/* send the packet */
if (sendto(conf->sockfd, (char *) request, total_length, 0,
&saremote, sizeof(struct sockaddr_in)) < 0) {
_pam_log(LOG_ERR, "Error sending RADIUS packet to server %s: %s",
server->hostname, strerror(errno));
ok = FALSE;
goto next; /* skip to the next server */
}
/* ************************************************************ */
/* Wait for the response, and verify it. */
salen = sizeof(struct sockaddr);
tv.tv_sec = server->timeout; /* wait for the specified time */
tv.tv_usec = 0;
FD_ZERO(&set); /* clear out the set */
FD_SET(conf->sockfd, &set); /* wait only for the RADIUS UDP socket */
time(&now);
end = now + tv.tv_sec;
/* loop, waiting for the select to return data */
ok = TRUE;
while (ok) {
rcode = select(conf->sockfd + 1, &set, NULL, NULL, &tv);
/* select timed out */
if (rcode == 0) {
_pam_log(LOG_ERR, "RADIUS server %s failed to respond", server->hostname);
if (--server_tries) {
goto send;
}
ok = FALSE;
break; /* exit from the select loop */
} else if (rcode < 0) {
/* select had an error */
if (errno == EINTR) { /* we were interrupted */
time(&now);
if (now > end) {
_pam_log(LOG_ERR, "RADIUS server %s failed to respond",
server->hostname);
if (--server_tries) goto send;
ok = FALSE;
break; /* exit from the select loop */
}
tv.tv_sec = end - now;
if (tv.tv_sec == 0) { /* keep waiting */
tv.tv_sec = 1;
}
} else { /* not an interrupt, it was a real error */
_pam_log(LOG_ERR, "Error waiting for response from RADIUS server %s: %s",
server->hostname, strerror(errno));
ok = FALSE;
break;
}
/* the select returned OK */
} else if (FD_ISSET(conf->sockfd, &set)) {
/* try to receive some data */
if ((total_length = recvfrom(conf->sockfd, (void *) response, BUFFER_SIZE,
0, &saremote, &salen)) < 0) {
_pam_log(LOG_ERR, "error reading RADIUS packet from server %s: %s",
server->hostname, strerror(errno));
ok = FALSE;
break;
/* there's data, see if it's valid */
} else {
char *p = server->secret;
if ((ntohs(response->length) != total_length) ||
(ntohs(response->length) > BUFFER_SIZE)) {
_pam_log(LOG_ERR, "RADIUS packet from server %s is corrupted",
server->hostname);
ok = FALSE;
break;
}
/* Check if we have the data OK. We should also check request->id */
if (password) {
if (old_password) {
#ifdef LIVINGSTON_PASSWORD_VERIFY_BUG_FIXED
p = old_password; /* what it should be */
#else
p = ""; /* what it really is */
#endif
}
/*
* RFC 2139 p.6 says not do do this, but the Livingston 1.16
* server disagrees. If the user says he wants the bug, give in.
*/
} else { /* authentication request */
if (conf->accounting_bug) {
p = "";
}
}
if (!verify_packet(p, response, request)) {
_pam_log(LOG_ERR, "packet from RADIUS server %s failed verification: "
"The shared secret is probably incorrect.", server->hostname);
ok = FALSE;
break;
}
/*
* Check that the response ID matches the request ID.
*/
if (response->id != request->id) {
_pam_log(LOG_WARNING, "Response packet ID %d does not match the "
"request packet ID %d: verification of packet fails",
response->id, request->id);
ok = FALSE;
break;
}
}
/*
* Whew! The select is done. It hasn't timed out, or errored out.
* It's our descriptor. We've got some data. It's the right size.
* The packet is valid.
* NOW, we can skip out of the select loop, and process the packet
*/
break;
}
/* otherwise, we've got data on another descriptor, keep select'ing */
}
/* go to the next server if this one didn't respond */
next:
if (!ok) {
radius_server_t *old; /* forget about this server */
old = server;
server = server->next;
conf->server = server;
_pam_forget(old->secret);
free(old->hostname);
free(old);
if (server) { /* if there's more servers to check */
/* get a new authentication vector, and update the passwords */
get_random_vector(request->vector);
request->id = request->vector[0];
/* update passwords, as appropriate */
if (password) {
get_random_vector(request->vector);
if (old_password) { /* password change request */
add_password(request, PW_PASSWORD, password, old_password);
add_password(request, PW_OLD_PASSWORD, old_password, old_password);
} else { /* authentication request */
add_password(request, PW_PASSWORD, password, server->secret);
}
}
}
continue;
} else {
/* we've found one that does respond, forget about the other servers */
cleanup(server->next);
server->next = NULL;
live_server = server; /* we've got a live one! */
break;
}
}
if (!server) {
_pam_log(LOG_ERR, "All RADIUS servers failed to respond.");
if (conf->localifdown)
retval = PAM_IGNORE;
else
retval = PAM_AUTHINFO_UNAVAIL;
} else {
retval = PAM_SUCCESS;
}
return retval;
} | 0 |
Android | 7558d03e6498e970b761aa44fff6b2c659202d95 | NOT_APPLICABLE | NOT_APPLICABLE | bool venc_dev::venc_set_error_resilience(OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE* error_resilience)
{
bool status = true;
struct venc_headerextension hec_cfg;
struct venc_multiclicecfg multislice_cfg;
int rc;
OMX_U32 resynchMarkerSpacingBytes = 0;
struct v4l2_control control;
memset(&control, 0, sizeof(control));
if (m_sVenc_cfg.codectype == V4L2_PIX_FMT_MPEG4) {
if (error_resilience->bEnableHEC) {
hec_cfg.header_extension = 1;
} else {
hec_cfg.header_extension = 0;
}
hec.header_extension = error_resilience->bEnableHEC;
}
if (error_resilience->bEnableRVLC) {
DEBUG_PRINT_ERROR("RVLC is not Supported");
return false;
}
if (( m_sVenc_cfg.codectype != V4L2_PIX_FMT_H263) &&
(error_resilience->bEnableDataPartitioning)) {
DEBUG_PRINT_ERROR("DataPartioning are not Supported for MPEG4/H264");
return false;
}
if (error_resilience->nResynchMarkerSpacing) {
resynchMarkerSpacingBytes = error_resilience->nResynchMarkerSpacing;
resynchMarkerSpacingBytes = ALIGN(resynchMarkerSpacingBytes, 8) >> 3;
}
if (( m_sVenc_cfg.codectype != V4L2_PIX_FMT_H263) &&
(error_resilience->nResynchMarkerSpacing)) {
multislice_cfg.mslice_mode = VEN_MSLICE_CNT_BYTE;
multislice_cfg.mslice_size = resynchMarkerSpacingBytes;
control.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE;
control.value = V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES;
} else if (m_sVenc_cfg.codectype == V4L2_PIX_FMT_H263 &&
error_resilience->bEnableDataPartitioning) {
multislice_cfg.mslice_mode = VEN_MSLICE_GOB;
multislice_cfg.mslice_size = resynchMarkerSpacingBytes;
control.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE;
control.value = V4L2_MPEG_VIDEO_MULTI_SLICE_GOB;
} else {
multislice_cfg.mslice_mode = VEN_MSLICE_OFF;
multislice_cfg.mslice_size = 0;
control.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE;
control.value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE;
}
DEBUG_PRINT_LOW("%s(): mode = %lu, size = %lu", __func__,
multislice_cfg.mslice_mode, multislice_cfg.mslice_size);
DEBUG_PRINT_ERROR("Calling IOCTL set control for id=%x, val=%d", control.id, control.value);
rc = ioctl(m_nDriver_fd, VIDIOC_S_CTRL, &control);
if (rc) {
DEBUG_PRINT_ERROR("Failed to set Slice mode control");
return false;
}
DEBUG_PRINT_ERROR("Success IOCTL set control for id=%x, value=%d", control.id, control.value);
multislice.mslice_mode=control.value;
control.id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES;
control.value = resynchMarkerSpacingBytes;
DEBUG_PRINT_ERROR("Calling IOCTL set control for id=%x, val=%d", control.id, control.value);
rc = ioctl(m_nDriver_fd, VIDIOC_S_CTRL, &control);
if (rc) {
DEBUG_PRINT_ERROR("Failed to set MAX MB control");
return false;
}
DEBUG_PRINT_ERROR("Success IOCTL set control for id=%x, value=%d", control.id, control.value);
multislice.mslice_mode = multislice_cfg.mslice_mode;
multislice.mslice_size = multislice_cfg.mslice_size;
return status;
}
| 0 |
linux | 38740a5b87d53ceb89eb2c970150f6e94e00373a | NOT_APPLICABLE | NOT_APPLICABLE | static int ffs_func_eps_enable(struct ffs_function *func)
{
struct ffs_data *ffs = func->ffs;
struct ffs_ep *ep = func->eps;
struct ffs_epfile *epfile = ffs->epfiles;
unsigned count = ffs->eps_count;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
do {
struct usb_endpoint_descriptor *ds;
int desc_idx;
if (ffs->gadget->speed == USB_SPEED_SUPER)
desc_idx = 2;
else if (ffs->gadget->speed == USB_SPEED_HIGH)
desc_idx = 1;
else
desc_idx = 0;
/* fall-back to lower speed if desc missing for current speed */
do {
ds = ep->descs[desc_idx];
} while (!ds && --desc_idx >= 0);
if (!ds) {
ret = -EINVAL;
break;
}
ep->ep->driver_data = ep;
ep->ep->desc = ds;
ret = usb_ep_enable(ep->ep);
if (likely(!ret)) {
epfile->ep = ep;
epfile->in = usb_endpoint_dir_in(ds);
epfile->isoc = usb_endpoint_xfer_isoc(ds);
} else {
break;
}
wake_up(&epfile->wait);
++ep;
++epfile;
} while (--count);
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
return ret;
}
| 0 |
qemu | 844864fbae66935951529408831c2f22367a57b6 | NOT_APPLICABLE | NOT_APPLICABLE | static QEMUSGList *megasas_get_sg_list(SCSIRequest *req)
{
MegasasCmd *cmd = req->hba_private;
if (cmd->frame->header.frame_cmd == MFI_CMD_DCMD) {
return NULL;
} else {
return &cmd->qsg;
}
}
| 0 |
gst-plugins-good | 02174790726dd20a5c73ce2002189bf240ad4fe0 | NOT_APPLICABLE | NOT_APPLICABLE | gst_matroska_demux_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
{
GstMatroskaDemux *demux = GST_MATROSKA_DEMUX (parent);
guint available;
GstFlowReturn ret = GST_FLOW_OK;
guint needed = 0;
guint32 id;
guint64 length;
if (G_UNLIKELY (GST_BUFFER_IS_DISCONT (buffer))) {
GST_DEBUG_OBJECT (demux, "got DISCONT");
gst_adapter_clear (demux->common.adapter);
GST_OBJECT_LOCK (demux);
gst_matroska_read_common_reset_streams (&demux->common,
GST_CLOCK_TIME_NONE, FALSE);
GST_OBJECT_UNLOCK (demux);
}
gst_adapter_push (demux->common.adapter, buffer);
buffer = NULL;
next:
available = gst_adapter_available (demux->common.adapter);
ret = gst_matroska_read_common_peek_id_length_push (&demux->common,
GST_ELEMENT_CAST (demux), &id, &length, &needed);
if (G_UNLIKELY (ret != GST_FLOW_OK && ret != GST_FLOW_EOS)) {
if (demux->common.ebml_segment_length != G_MAXUINT64
&& demux->common.offset >=
demux->common.ebml_segment_start + demux->common.ebml_segment_length) {
return GST_FLOW_OK;
} else {
gint64 bytes_scanned;
if (demux->common.start_resync_offset == -1) {
demux->common.start_resync_offset = demux->common.offset;
demux->common.state_to_restore = demux->common.state;
}
bytes_scanned = demux->common.offset - demux->common.start_resync_offset;
if (bytes_scanned <= INVALID_DATA_THRESHOLD) {
GST_WARNING_OBJECT (demux,
"parse error, looking for next cluster, actual offset %"
G_GUINT64_FORMAT ", start resync offset %" G_GUINT64_FORMAT,
demux->common.offset, demux->common.start_resync_offset);
demux->common.state = GST_MATROSKA_READ_STATE_SCANNING;
ret = GST_FLOW_OK;
} else {
GST_WARNING_OBJECT (demux,
"unrecoverable parse error, next cluster not found and threshold "
"exceeded, bytes scanned %" G_GINT64_FORMAT, bytes_scanned);
return ret;
}
}
}
GST_LOG_OBJECT (demux, "Offset %" G_GUINT64_FORMAT ", Element id 0x%x, "
"size %" G_GUINT64_FORMAT ", needed %d, available %d",
demux->common.offset, id, length, needed, available);
if (needed > available)
return GST_FLOW_OK;
ret = gst_matroska_demux_parse_id (demux, id, length, needed);
if (ret == GST_FLOW_EOS) {
/* need more data */
return GST_FLOW_OK;
} else if (ret != GST_FLOW_OK) {
return ret;
} else
goto next;
} | 0 |
wget | d892291fb8ace4c3b734ea5125770989c215df3f | NOT_APPLICABLE | NOT_APPLICABLE | set_content_type (int *dt, const char *type)
{
/* If content-type is not given, assume text/html. This is because
of the multitude of broken CGI's that "forget" to generate the
content-type. */
if (!type ||
0 == c_strcasecmp (type, TEXTHTML_S) ||
0 == c_strcasecmp (type, TEXTXHTML_S))
*dt |= TEXTHTML;
else
*dt &= ~TEXTHTML;
if (type &&
0 == c_strcasecmp (type, TEXTCSS_S))
*dt |= TEXTCSS;
else
*dt &= ~TEXTCSS;
} | 0 |
ghostpdl | bf72f1a3dd5392ee8291e3b1518a0c2c5dc6ba39 | NOT_APPLICABLE | NOT_APPLICABLE | bjc_put_hi_lo(gp_file *file, int value)
{
gp_fputc(((value & 0xffff) >> 8), file);
gp_fputc(value & 0xff, file);
} | 0 |
Chrome | befb46ae3385fa13975521e9a2281e35805b339e | NOT_APPLICABLE | NOT_APPLICABLE | void FrameLoader::loadURLIntoChildFrame(const KURL& url, const String& referer, Frame* childFrame)
{
ASSERT(childFrame);
HistoryItem* parentItem = history()->currentItem();
FrameLoadType loadType = this->loadType();
FrameLoadType childLoadType = FrameLoadTypeRedirectWithLockedBackForwardList;
KURL workingURL = url;
if (parentItem && parentItem->children().size() && isBackForwardLoadType(loadType)) {
HistoryItem* childItem = parentItem->childItemWithTarget(childFrame->tree()->name());
if (childItem) {
workingURL = KURL(ParsedURLString, childItem->originalURLString());
childLoadType = loadType;
childFrame->loader()->history()->setProvisionalItem(childItem);
}
}
RefPtr<Archive> subframeArchive = activeDocumentLoader()->popArchiveForSubframe(childFrame->tree()->name());
if (subframeArchive)
childFrame->loader()->loadArchive(subframeArchive.release());
else
childFrame->loader()->loadURL(workingURL, referer, String(), false, childLoadType, 0, 0);
}
| 0 |
Bento4 | 41cad602709436628f07b4c4f64e9ff7a611f687 | NOT_APPLICABLE | NOT_APPLICABLE | AP4_DataAtom::AP4_DataAtom(AP4_UI32 size, AP4_ByteStream& stream) :
AP4_Atom(AP4_ATOM_TYPE_DATA, size),
m_Source(NULL)
{
if (size < AP4_ATOM_HEADER_SIZE+8) return;
AP4_UI32 i;
stream.ReadUI32(i); m_DataType = (DataType)i;
stream.ReadUI32(i); m_DataLang = (DataLang)i;
// the stream for the data is a substream of this source
AP4_Position data_offset;
stream.Tell(data_offset);
AP4_Size data_size = size-AP4_ATOM_HEADER_SIZE-8;
m_Source = new AP4_SubStream(stream, data_offset, data_size);
} | 0 |
linux | 681fef8380eb818c0b845fca5d2ab1dcbab114ee | CVE-2016-4482 | CWE-200 | static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg)
{
struct usbdevfs_connectinfo ci = {
.devnum = ps->dev->devnum,
.slow = ps->dev->speed == USB_SPEED_LOW
};
if (copy_to_user(arg, &ci, sizeof(ci)))
return -EFAULT;
return 0;
}
| 1 |
radare2 | 10517e3ff0e609697eb8cde60ec8dc999ee5ea24 | NOT_APPLICABLE | NOT_APPLICABLE | R_API void r_core_anal_inflags(RCore *core, const char *glob) {
RList *addrs = r_list_newf (free);
RListIter *iter;
bool a2f = r_config_get_i (core->config, "anal.a2f");
char *anal_in = strdup (r_config_get (core->config, "anal.in"));
r_config_set (core->config, "anal.in", "block");
// aaFa = use a2f instead of af+
bool simple = (!glob || *glob != 'a');
glob = r_str_trim_head_ro (glob);
char *addr;
r_flag_foreach_glob (core->flags, glob, __cb, addrs);
// should be sorted already
r_list_sort (addrs, (RListComparator)__addrs_cmp);
r_list_foreach (addrs, iter, addr) {
if (!iter->n || r_cons_is_breaked ()) {
break;
}
char *addr2 = iter->n->data;
if (!addr || !addr2) {
break;
}
ut64 a0 = r_num_get (NULL, addr);
ut64 a1 = r_num_get (NULL, addr2);
if (a0 == a1) {
// ignore
continue;
}
if (a0 > a1) {
eprintf ("Warning: unsorted flag list 0x%"PFMT64x" 0x%"PFMT64x"\n", a0, a1);
continue;
}
st64 sz = a1 - a0;
if (sz < 1 || sz > core->anal->opt.bb_max_size) {
eprintf ("Warning: invalid flag range from 0x%08"PFMT64x" to 0x%08"PFMT64x"\n", a0, a1);
continue;
}
if (simple) {
RFlagItem *fi = r_flag_get_at (core->flags, a0, 0);
r_core_cmdf (core, "af+ %s fcn.%s", addr, fi? fi->name: addr);
r_core_cmdf (core, "afb+ %s %s %d", addr, addr, (int)sz);
} else {
r_core_cmdf (core, "aab@%s!%s-%s\n", addr, addr2, addr);
RAnalFunction *fcn = r_anal_get_fcn_in (core->anal, r_num_math (core->num, addr), 0);
if (fcn) {
eprintf ("%s %s %"PFMT64d" # %s\n", addr, "af", sz, fcn->name);
} else {
if (a2f) {
r_core_cmdf (core, "a2f@%s!%s-%s\n", addr, addr2, addr);
} else {
r_core_cmdf (core, "af@%s!%s-%s\n", addr, addr2, addr);
}
fcn = r_anal_get_fcn_in (core->anal, r_num_math (core->num, addr), 0);
eprintf ("%s %s %.4"PFMT64d" # %s\n", addr, "aab", sz, fcn?fcn->name: "");
}
}
}
r_list_free (addrs);
r_config_set (core->config, "anal.in", anal_in);
free (anal_in);
} | 0 |
vim | 409510c588b1eec1ae33511ae97a21eb8e110895 | NOT_APPLICABLE | NOT_APPLICABLE | ignorecase_opt(char_u *pat, int ic_in, int scs)
{
int ic = ic_in;
if (ic && !no_smartcase && scs
&& !(ctrl_x_mode_not_default() && curbuf->b_p_inf))
ic = !pat_has_uppercase(pat);
no_smartcase = FALSE;
return ic;
} | 0 |
server | af810407f78b7f792a9bb8c47c8c532eb3b3a758 | NOT_APPLICABLE | NOT_APPLICABLE | int ha_rollback_trans(THD *thd, bool all)
{
int error=0;
THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt;
Ha_trx_info *ha_info= trans->ha_list, *ha_info_next;
/*
"real" is a nick name for a transaction for which a commit will
make persistent changes. E.g. a 'stmt' transaction inside a 'all'
transaction is not 'real': even though it's possible to commit it,
the changes are not durable as they might be rolled back if the
enclosing 'all' transaction is rolled back.
We establish the value of 'is_real_trans' by checking
if it's an explicit COMMIT or BEGIN statement, or implicit
commit issued by DDL (in these cases all == TRUE),
or if we're running in autocommit mode (it's only in the autocommit mode
ha_commit_one_phase() is called with an empty
transaction.all.ha_list, see why in trans_register_ha()).
*/
bool is_real_trans=all || thd->transaction.all.ha_list == 0;
DBUG_ENTER("ha_rollback_trans");
/*
We must not rollback the normal transaction if a statement
transaction is pending.
*/
DBUG_ASSERT(thd->transaction.stmt.ha_list == NULL ||
trans == &thd->transaction.stmt);
#ifdef HAVE_REPLICATION
if (is_real_trans)
{
/*
In parallel replication, if we need to rollback during commit, we must
first inform following transactions that we are going to abort our commit
attempt. Otherwise those following transactions can run too early, and
possibly cause replication to fail. See comments in retry_event_group().
There were several bugs with this in the past that were very hard to
track down (MDEV-7458, MDEV-8302). So we add here an assertion for
rollback without signalling following transactions. And in release
builds, we explicitly do the signalling before rolling back.
*/
DBUG_ASSERT(!(thd->rgi_slave && thd->rgi_slave->did_mark_start_commit));
if (thd->rgi_slave && thd->rgi_slave->did_mark_start_commit)
thd->rgi_slave->unmark_start_commit();
}
#endif
if (thd->in_sub_stmt)
{
DBUG_ASSERT(0);
/*
If we are inside stored function or trigger we should not commit or
rollback current statement transaction. See comment in ha_commit_trans()
call for more information.
*/
if (!all)
DBUG_RETURN(0);
my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0));
DBUG_RETURN(1);
}
#ifdef WITH_WSREP
(void) wsrep_before_rollback(thd, all);
#endif /* WITH_WSREP */
if (ha_info)
{
/* Close all cursors that can not survive ROLLBACK */
if (is_real_trans) /* not a statement commit */
thd->stmt_map.close_transient_cursors();
for (; ha_info; ha_info= ha_info_next)
{
int err;
handlerton *ht= ha_info->ht();
if ((err= ht->rollback(ht, thd, all)))
{ // cannot happen
my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err);
error=1;
#ifdef WITH_WSREP
WSREP_WARN("handlerton rollback failed, thd %lld %lld conf %d SQL %s",
thd->thread_id, thd->query_id, thd->wsrep_trx().state(),
thd->query());
#endif /* WITH_WSREP */
}
status_var_increment(thd->status_var.ha_rollback_count);
ha_info_next= ha_info->next();
ha_info->reset(); /* keep it conveniently zero-filled */
}
trans->ha_list= 0;
trans->no_2pc=0;
}
#ifdef WITH_WSREP
if (thd->is_error())
{
WSREP_DEBUG("ha_rollback_trans(%lld, %s) rolled back: %s: %s; is_real %d",
thd->thread_id, all?"TRUE":"FALSE", wsrep_thd_query(thd),
thd->get_stmt_da()->message(), is_real_trans);
}
(void) wsrep_after_rollback(thd, all);
#endif /* WITH_WSREP */
/* Always cleanup. Even if nht==0. There may be savepoints. */
if (is_real_trans)
{
/*
Thanks to possibility of MDL deadlock rollback request can come even if
transaction hasn't been started in any transactional storage engine.
*/
if (thd->transaction_rollback_request &&
thd->transaction.xid_state.is_explicit_XA())
thd->transaction.xid_state.set_error(thd->get_stmt_da()->sql_errno());
thd->has_waiter= false;
thd->transaction.cleanup();
}
if (all)
thd->transaction_rollback_request= FALSE;
/*
If a non-transactional table was updated, warn; don't warn if this is a
slave thread (because when a slave thread executes a ROLLBACK, it has
been read from the binary log, so it's 100% sure and normal to produce
error ER_WARNING_NOT_COMPLETE_ROLLBACK. If we sent the warning to the
slave SQL thread, it would not stop the thread but just be printed in
the error log; but we don't want users to wonder why they have this
message in the error log, so we don't send it.
We don't have to test for thd->killed == KILL_SYSTEM_THREAD as
it doesn't matter if a warning is pushed to a system thread or not:
No one will see it...
*/
if (is_real_trans && thd->transaction.all.modified_non_trans_table &&
!thd->slave_thread && thd->killed < KILL_CONNECTION)
push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARNING_NOT_COMPLETE_ROLLBACK,
ER_THD(thd, ER_WARNING_NOT_COMPLETE_ROLLBACK));
#ifdef HAVE_REPLICATION
repl_semisync_master.wait_after_rollback(thd, all);
#endif
DBUG_RETURN(error);
} | 0 |
net | e572ff80f05c33cd0cb4860f864f5c9c044280b6 | CVE-2021-45402 | CWE-668 | static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
{
reg->umin_value = reg->u32_min_value;
reg->umax_value = reg->u32_max_value;
/* Attempt to pull 32-bit signed bounds into 64-bit bounds
* but must be positive otherwise set to worse case bounds
* and refine later from tnum.
*/
if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
reg->smax_value = reg->s32_max_value;
else
reg->smax_value = U32_MAX;
if (reg->s32_min_value >= 0)
reg->smin_value = reg->s32_min_value;
else
reg->smin_value = 0;
} | 1 |
FFmpeg | e1182fac1afba92a4975917823a5f644bee7e6e8 | NOT_APPLICABLE | NOT_APPLICABLE | void ff_mpeg4_init_partitions(MpegEncContext *s)
{
uint8_t *start = put_bits_ptr(&s->pb);
uint8_t *end = s->pb.buf_end;
int size = end - start;
int pb_size = (((intptr_t)start + size / 3) & (~3)) - (intptr_t)start;
int tex_size = (size - 2 * pb_size) & (~3);
set_put_bits_buffer_size(&s->pb, pb_size);
init_put_bits(&s->tex_pb, start + pb_size, tex_size);
init_put_bits(&s->pb2, start + pb_size + tex_size, pb_size);
}
| 0 |
mysql-server | 3d8134d2c9b74bc8883ffe2ef59c168361223837 | NOT_APPLICABLE | NOT_APPLICABLE | mysql_get_proto_info(MYSQL *mysql)
{
return (mysql->protocol_version);
} | 0 |
Chrome | e5787005a9004d7be289cc649c6ae4f3051996cd | NOT_APPLICABLE | NOT_APPLICABLE | void RenderWidgetHostImpl::SendCursorVisibilityState(bool is_visible) {
Send(new InputMsg_CursorVisibilityChange(GetRoutingID(), is_visible));
}
| 0 |
Subsets and Splits