idx int64 | target int64 | func string |
|---|---|---|
0 | 0 | void * gdImageWBMPPtr (gdImagePtr im, int *size, int fg)
{
void *rv;
gdIOCtx *out = gdNewDynamicCtx(2048, NULL);
if (!_gdImageWBMPCtx(im, fg, out)) {
rv = gdDPExtractData(out, size);
} else {
rv = NULL;
}
out->gd_free(out);
return rv;
} |
1 | 0 | mbfl_convert_encoding(
mbfl_string *string,
mbfl_string *result,
enum mbfl_no_encoding toenc)
{
int n;
unsigned char *p;
const mbfl_encoding *encoding;
mbfl_memory_device device;
mbfl_convert_filter *filter1;
mbfl_convert_filter *filter2;
/* initialize */
encoding = mbfl_no2encoding(toenc);
if (encoding == NULL || string == NULL || result == NULL) {
return NULL;
}
filter1 = NULL;
filter2 = NULL;
if (mbfl_convert_filter_get_vtbl(string->no_encoding, toenc) != NULL) {
filter1 = mbfl_convert_filter_new(string->no_encoding, toenc, mbfl_memory_device_output, 0, &device);
} else {
filter2 = mbfl_convert_filter_new(mbfl_no_encoding_wchar, toenc, mbfl_memory_device_output, 0, &device);
if (filter2 != NULL) {
filter1 = mbfl_convert_filter_new(string->no_encoding, mbfl_no_encoding_wchar, (int (*)(int, void*))filter2->filter_function, NULL, filter2);
if (filter1 == NULL) {
mbfl_convert_filter_delete(filter2);
}
}
}
if (filter1 == NULL) {
return NULL;
}
if (filter2 != NULL) {
filter2->illegal_mode = MBFL_OUTPUTFILTER_ILLEGAL_MODE_CHAR;
filter2->illegal_substchar = 0x3f; /* '?' */
}
mbfl_memory_device_init(&device, string->len, (string->len >> 2) + 8);
/* feed data */
n = string->len;
p = string->val;
if (p != NULL) {
while (n > 0) {
if ((*filter1->filter_function)(*p++, filter1) < 0) {
break;
}
n--;
}
}
mbfl_convert_filter_flush(filter1);
mbfl_convert_filter_delete(filter1);
if (filter2 != NULL) {
mbfl_convert_filter_flush(filter2);
mbfl_convert_filter_delete(filter2);
}
return mbfl_memory_device_result(&device, result);
} |
2 | 0 | client_connected_to_dbus (GObject *source_object,
GAsyncResult *res,
gpointer user_data)
{
FlatpakProxyClient *client = user_data;
GSocketConnection *connection;
GError *error = NULL;
GIOStream *stream;
stream = g_dbus_address_get_stream_finish (res, NULL, &error);
if (stream == NULL)
{
g_warning ("Failed to connect to bus: %s", error->message);
g_object_unref (client);
return;
}
connection = G_SOCKET_CONNECTION (stream);
g_socket_set_blocking (g_socket_connection_get_socket (connection), FALSE);
client->bus_side.connection = connection;
start_reading (&client->client_side);
start_reading (&client->bus_side);
} |
3 | 0 | void WebContents::OnCursorChanged(const content::WebCursor& webcursor) {
const ui::Cursor& cursor = webcursor.cursor();
if (cursor.type() == ui::mojom::CursorType::kCustom) {
Emit("cursor-changed", CursorTypeToString(cursor),
gfx::Image::CreateFrom1xBitmap(cursor.custom_bitmap()),
cursor.image_scale_factor(),
gfx::Size(cursor.custom_bitmap().width(),
cursor.custom_bitmap().height()),
cursor.custom_hotspot());
} else {
Emit("cursor-changed", CursorTypeToString(cursor));
}
} |
4 | 0 | apr_status_t h2_session_rcreate(h2_session **psession,
request_rec *r, h2_ctx *ctx, h2_workers *workers)
{
return h2_session_create_int(psession, r->connection, r, ctx, workers);
} |
5 | 0 | reference_mode_end(int result)
{
switch (ref_mode) {
case REF_ModeNormal:
case REF_ModeUpdateOnce:
case REF_ModePrintOnce:
exit_status = !result;
SCH_QuitProgram();
break;
case REF_ModeInitStepSlew:
/* Switch to the normal mode, the delay is used to prevent polling
interval shorter than the burst interval if some configured servers
were used also for initstepslew */
SCH_AddTimeoutByDelay(2.0, post_init_ntp_hook, NULL);
break;
default:
assert(0);
}
} |
6 | 0 | static ssize_t event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
} |
7 | 0 | ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
struct ext4_ext_path *path)
{
struct ext4_extent_header *eh;
struct buffer_head *bh;
short int depth, i, ppos = 0, alloc = 0;
eh = ext_inode_hdr(inode);
depth = ext_depth(inode);
/* account possible depth increase */
if (!path) {
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
GFP_NOFS);
if (!path)
return ERR_PTR(-ENOMEM);
alloc = 1;
}
path[0].p_hdr = eh;
path[0].p_bh = NULL;
i = depth;
/* walk through the tree */
while (i) {
int need_to_validate = 0;
ext_debug("depth %d: num %d, max %d\n",
ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
ext4_ext_binsearch_idx(inode, path + ppos, block);
path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
path[ppos].p_depth = i;
path[ppos].p_ext = NULL;
bh = sb_getblk(inode->i_sb, path[ppos].p_block);
if (unlikely(!bh))
goto err;
if (!bh_uptodate_or_lock(bh)) {
trace_ext4_ext_load_extent(inode, block,
path[ppos].p_block);
if (bh_submit_read(bh) < 0) {
put_bh(bh);
goto err;
}
/* validate the extent entries */
need_to_validate = 1;
}
eh = ext_block_hdr(bh);
ppos++;
if (unlikely(ppos > depth)) {
put_bh(bh);
EXT4_ERROR_INODE(inode,
"ppos %d > depth %d", ppos, depth);
goto err;
}
path[ppos].p_bh = bh;
path[ppos].p_hdr = eh;
i--;
if (need_to_validate && ext4_ext_check(inode, eh, i))
goto err;
}
path[ppos].p_depth = i;
path[ppos].p_ext = NULL;
path[ppos].p_idx = NULL;
/* find extent */
ext4_ext_binsearch(inode, path + ppos, block);
/* if not an empty leaf */
if (path[ppos].p_ext)
path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
ext4_ext_show_path(inode, path);
return path;
err:
ext4_ext_drop_refs(path);
if (alloc)
kfree(path);
return ERR_PTR(-EIO);
} |
8 | 0 | static void test_bug8378()
{
#if defined(HAVE_CHARSET_gbk) && !defined(EMBEDDED_LIBRARY)
MYSQL *lmysql;
char out[9]; /* strlen(TEST_BUG8378)*2+1 */
char buf[256];
int len, rc;
myheader("test_bug8378");
if (!opt_silent)
fprintf(stdout, "\n Establishing a test connection ...");
if (!(lmysql= mysql_client_init(NULL)))
{
myerror("mysql_client_init() failed");
exit(1);
}
if (mysql_options(lmysql, MYSQL_SET_CHARSET_NAME, "gbk"))
{
myerror("mysql_options() failed");
exit(1);
}
if (!(mysql_real_connect(lmysql, opt_host, opt_user,
opt_password, current_db, opt_port,
opt_unix_socket, 0)))
{
myerror("connection failed");
exit(1);
}
if (!opt_silent)
fprintf(stdout, "OK");
rc= mysql_query(lmysql, "SET SQL_MODE=''");
myquery(rc);
len= mysql_real_escape_string(lmysql, out, TEST_BUG8378_IN, 4);
/* No escaping should have actually happened. */
DIE_UNLESS(memcmp(out, TEST_BUG8378_OUT, len) == 0);
sprintf(buf, "SELECT '%s'", out);
rc=mysql_real_query(lmysql, buf, strlen(buf));
myquery(rc);
mysql_close(lmysql);
#endif
} |
9 | 1 | struct page *cma_alloc(struct cma *cma, unsigned int count, unsigned int align)
{
unsigned long mask, offset, pfn, start = 0;
unsigned long bitmap_maxno, bitmap_no, bitmap_count;
struct page *page = NULL;
int ret;
if (!cma || !cma->count)
return NULL;
pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
count, align);
if (!count)
return NULL;
mask = cma_bitmap_aligned_mask(cma, align);
offset = cma_bitmap_aligned_offset(cma, align);
bitmap_maxno = cma_bitmap_maxno(cma);
bitmap_count = cma_bitmap_pages_to_bits(cma, count);
for (;;) {
mutex_lock(&cma->lock);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
bitmap_maxno, start, bitmap_count, mask,
offset);
if (bitmap_no >= bitmap_maxno) {
mutex_unlock(&cma->lock);
break;
}
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
/*
* It's safe to drop the lock here. We've marked this region for
* our exclusive use. If the migration fails we will take the
* lock again and unmark it.
*/
mutex_unlock(&cma->lock);
pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
mutex_lock(&cma_mutex);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
mutex_unlock(&cma_mutex);
if (ret == 0) {
page = pfn_to_page(pfn);
break;
}
cma_clear_bitmap(cma, pfn, count);
if (ret != -EBUSY)
break;
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
/* try again with a bit different memory target */
start = bitmap_no + mask + 1;
}
trace_cma_alloc(page ? pfn : -1UL, page, count, align);
pr_debug("%s(): returned %p\n", __func__, page);
return page;
} |
10 | 0 | static void *DestroyLogElement(void *log_info)
{
register LogInfo
*p;
p=(LogInfo *) log_info;
if (p->file != (FILE *) NULL)
{
(void) FormatLocaleFile(p->file,"</log>\n");
(void) fclose(p->file);
p->file=(FILE *) NULL;
}
if (p->format != (char *) NULL)
p->format=DestroyString(p->format);
if (p->path != (char *) NULL)
p->path=DestroyString(p->path);
if (p->filename != (char *) NULL)
p->filename=DestroyString(p->filename);
p=(LogInfo *) RelinquishMagickMemory(p);
return((void *) NULL);
} |
11 | 0 | gimp_channel_shrink (GimpChannel *channel,
gint radius_x,
gint radius_y,
gboolean edge_lock,
gboolean push_undo)
{
g_return_if_fail (GIMP_IS_CHANNEL (channel));
if (! gimp_item_is_attached (GIMP_ITEM (channel)))
push_undo = FALSE;
GIMP_CHANNEL_GET_CLASS (channel)->shrink (channel, radius_x, radius_y,
edge_lock, push_undo);
} |
12 | 0 | int main(int argc, char **argv)
{
int fmtid;
int id;
char *infile;
jas_stream_t *instream;
jas_image_t *image;
int width;
int height;
int depth;
int numcmpts;
int verbose;
char *fmtname;
int debug;
size_t max_mem;
size_t max_samples;
char optstr[32];
if (jas_init()) {
abort();
}
cmdname = argv[0];
max_samples = 64 * JAS_MEBI;
infile = 0;
verbose = 0;
debug = 0;
#if defined(JAS_DEFAULT_MAX_MEM_USAGE)
max_mem = JAS_DEFAULT_MAX_MEM_USAGE;
#endif
/* Parse the command line options. */
while ((id = jas_getopt(argc, argv, opts)) >= 0) {
switch (id) {
case OPT_VERBOSE:
verbose = 1;
break;
case OPT_VERSION:
printf("%s\n", JAS_VERSION);
exit(EXIT_SUCCESS);
break;
case OPT_DEBUG:
debug = atoi(jas_optarg);
break;
case OPT_INFILE:
infile = jas_optarg;
break;
case OPT_MAXSAMPLES:
max_samples = strtoull(jas_optarg, 0, 10);
break;
case OPT_MAXMEM:
max_mem = strtoull(jas_optarg, 0, 10);
break;
case OPT_HELP:
default:
usage();
break;
}
}
jas_setdbglevel(debug);
#if defined(JAS_DEFAULT_MAX_MEM_USAGE)
jas_set_max_mem_usage(max_mem);
#endif
/* Open the image file. */
if (infile) {
/* The image is to be read from a file. */
if (!(instream = jas_stream_fopen(infile, "rb"))) {
fprintf(stderr, "cannot open input image file %s\n", infile);
exit(EXIT_FAILURE);
}
} else {
/* The image is to be read from standard input. */
if (!(instream = jas_stream_fdopen(0, "rb"))) {
fprintf(stderr, "cannot open standard input\n");
exit(EXIT_FAILURE);
}
}
if ((fmtid = jas_image_getfmt(instream)) < 0) {
fprintf(stderr, "unknown image format\n");
}
snprintf(optstr, sizeof(optstr), "max_samples=%-zu", max_samples);
/* Decode the image. */
if (!(image = jas_image_decode(instream, fmtid, optstr))) {
jas_stream_close(instream);
fprintf(stderr, "cannot load image\n");
return EXIT_FAILURE;
}
/* Close the image file. */
jas_stream_close(instream);
if (!(fmtname = jas_image_fmttostr(fmtid))) {
jas_eprintf("format name lookup failed\n");
return EXIT_FAILURE;
}
if (!(numcmpts = jas_image_numcmpts(image))) {
fprintf(stderr, "warning: image has no components\n");
}
if (numcmpts) {
width = jas_image_cmptwidth(image, 0);
height = jas_image_cmptheight(image, 0);
depth = jas_image_cmptprec(image, 0);
} else {
width = 0;
height = 0;
depth = 0;
}
printf("%s %d %d %d %d %ld\n", fmtname, numcmpts, width, height, depth,
JAS_CAST(long, jas_image_rawsize(image)));
jas_image_destroy(image);
jas_image_clearfmts();
return EXIT_SUCCESS;
} |
13 | 0 | struct yang_data *yang_data_new_uint16(const char *xpath, uint16_t value)
{
char value_str[BUFSIZ];
snprintf(value_str, sizeof(value_str), "%u", value);
return yang_data_new(xpath, value_str);
} |
14 | 0 | Item_timestamp_literal(THD *thd)
:Item_literal(thd)
{ } |
15 | 1 | static int __vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
r = vapic_enter(vcpu);
if (r) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
return r;
}
r = 1;
while (r > 0) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
r = vcpu_enter_guest(vcpu);
else {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
kvm_apic_accept_events(vcpu);
switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false;
break;
case KVM_MP_STATE_INIT_RECEIVED:
break;
default:
r = -EINTR;
break;
}
}
}
if (r <= 0)
break;
clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
if (dm_request_for_irq_injection(vcpu)) {
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.request_irq_exits;
}
kvm_check_async_pf_completion(vcpu);
if (signal_pending(current)) {
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
}
if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_resched(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
}
}
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
vapic_exit(vcpu);
return r;
} |
16 | 0 | struct nls_table *load_nls(char *charset)
{
return try_then_request_module(find_nls(charset), "nls_%s", charset);
} |
17 | 0 | void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{
u64 sample_type = event->attr.sample_type;
header->type = PERF_RECORD_SAMPLE;
header->size = sizeof(*header) + event->header_size;
header->misc = 0;
header->misc |= perf_misc_flags(regs);
__perf_event_header__init_id(header, data, event);
if (sample_type & PERF_SAMPLE_IP)
data->ip = perf_instruction_pointer(regs);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
data->callchain = perf_callchain(event, regs);
if (data->callchain)
size += data->callchain->nr;
header->size += size * sizeof(u64);
}
if (sample_type & PERF_SAMPLE_RAW) {
int size = sizeof(u32);
if (data->raw)
size += data->raw->size;
else
size += sizeof(u32);
WARN_ON_ONCE(size & (sizeof(u64)-1));
header->size += size;
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
int size = sizeof(u64); /* nr */
if (data->br_stack) {
size += data->br_stack->nr
* sizeof(struct perf_branch_entry);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
int size = sizeof(u64);
perf_sample_regs_user(&data->regs_user, regs);
if (data->regs_user.regs) {
u64 mask = event->attr.sample_regs_user;
size += hweight64(mask) * sizeof(u64);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_STACK_USER) {
/*
* Either we need PERF_SAMPLE_STACK_USER bit to be allways
* processed as the last one or have additional check added
* in case new sample type is added, because we could eat
* up the rest of the sample size.
*/
struct perf_regs_user *uregs = &data->regs_user;
u16 stack_size = event->attr.sample_stack_user;
u16 size = sizeof(u64);
if (!uregs->abi)
perf_sample_regs_user(uregs, regs);
stack_size = perf_sample_ustack_size(stack_size, header->size,
uregs->regs);
/*
* If there is something to dump, add space for the dump
* itself and for the field that tells the dynamic size,
* which is how many have been actually dumped.
*/
if (stack_size)
size += sizeof(u64) + stack_size;
data->stack_user_size = stack_size;
header->size += size;
}
} |
18 | 0 | run_client_script_err_estimate(
gpointer data,
gpointer user_data)
{
char *line = data;
script_output_t *so = user_data;
if (line && so->stream) {
char *qdisk = quote_string(so->dle->disk);
g_fprintf(so->stream, "%s 0 WARNING \"%s\"\n", qdisk, line);
amfree(qdisk);
}
} |
19 | 0 | static int vvalue_tvb_vector_internal(tvbuff_t *tvb, int offset, struct vt_vector *val, struct vtype_data *type, guint num)
{
const int offset_in = offset;
const gboolean varsize = (type->size == -1);
const guint elsize = varsize ? (guint)sizeof(struct data_blob) : (guint)type->size;
guint8 *data;
int len;
guint i;
/*
* Make sure we actually *have* the data we're going to fetch
* here, before making a possibly-doomed attempt to allocate
* memory for it.
*
* First, check for sane values.
*/
if (num > MAX_VT_VECTOR_SIZE) {
THROW(ReportedBoundsError);
}
/*
* No huge numbers from the wire; now make sure we at least have that data.
*/
tvb_ensure_bytes_exist(tvb, offset, elsize * num);
/*
* OK, it exists; allocate a buffer into which to fetch it.
*/
data = (guint8*)wmem_alloc(wmem_packet_scope(), elsize * num);
val->len = num;
val->u.vt_ui1 = data;
DISSECTOR_ASSERT((void*)&val->u == ((void*)&val->u.vt_ui1));
for (i=0; i<num; i++) {
DISSECTOR_ASSERT_HINT(type->tvb_get != 0,
"type that we don't know yet how to handle, please submit a bug with trace");
len = type->tvb_get(tvb, offset, data);
data += elsize;
offset += len;
if (varsize && (offset % 4) ) { /* at begin or end of loop ??? */
int padding = 4 - (offset % 4);
offset += padding;
}
}
return offset - offset_in;
} |
20 | 0 | uint32_t mg_crc32(uint32_t crc, const char *buf, size_t len) {
int i;
crc = ~crc;
while (len--) {
crc ^= *(unsigned char *) buf++;
for (i = 0; i < 8; i++) crc = crc & 1 ? (crc >> 1) ^ 0xedb88320 : crc >> 1;
}
return ~crc;
} |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 7