idx
int64 | target
int64 | func
string |
|---|---|---|
0
| 0
|
void luac_build_info_free(LuacBinInfo *bin_info) {
if (!bin_info) {
return;
}
rz_list_free(bin_info->entry_list);
rz_list_free(bin_info->symbol_list);
rz_list_free(bin_info->section_list);
rz_list_free(bin_info->string_list);
free(bin_info);
}
|
1
| 1
|
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);
const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);
TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
switch (input1->type) {
case kTfLiteInt32: {
return EvalImpl<int32_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteInt64: {
return EvalImpl<int64_t>(context, data->requires_broadcast, input1,
input2, output);
}
case kTfLiteFloat32: {
return EvalImpl<float>(context, data->requires_broadcast, input1, input2,
output);
}
default: {
context->ReportError(context, "Type '%s' is not supported by floor_mod.",
TfLiteTypeGetName(input1->type));
return kTfLiteError;
}
}
}
|
2
| 1
|
list_session(char *log_dir, regex_t *re, const char *user, const char *tty)
{
char idbuf[7], *idstr, *cp;
struct eventlog *evlog = NULL;
const char *timestr;
int ret = -1;
debug_decl(list_session, SUDO_DEBUG_UTIL);
if ((evlog = iolog_parse_loginfo(-1, log_dir)) == NULL)
goto done;
if (evlog->command == NULL || evlog->submituser == NULL ||
evlog->runuser == NULL) {
goto done;
}
/* Match on search expression if there is one. */
if (!STAILQ_EMPTY(&search_expr) && !match_expr(&search_expr, evlog, true))
goto done;
/* Convert from /var/log/sudo-sessions/00/00/01 to 000001 */
cp = log_dir + strlen(session_dir) + 1;
if (IS_IDLOG(cp)) {
idbuf[0] = cp[0];
idbuf[1] = cp[1];
idbuf[2] = cp[3];
idbuf[3] = cp[4];
idbuf[4] = cp[6];
idbuf[5] = cp[7];
idbuf[6] = '\0';
idstr = idbuf;
} else {
/* Not an id, use as-is. */
idstr = cp;
}
/* XXX - print lines + cols? */
timestr = get_timestr(evlog->submit_time.tv_sec, 1);
printf("%s : %s : ", timestr ? timestr : "invalid date", evlog->submituser);
if (evlog->submithost != NULL)
printf("HOST=%s ; ", evlog->submithost);
if (evlog->ttyname != NULL)
printf("TTY=%s ; ", evlog->ttyname);
if (evlog->runchroot != NULL)
printf("CHROOT=%s ; ", evlog->runchroot);
if (evlog->runcwd != NULL || evlog->cwd != NULL)
printf("CWD=%s ; ", evlog->runcwd ? evlog->runcwd : evlog->cwd);
printf("USER=%s ; ", evlog->runuser);
if (evlog->rungroup != NULL)
printf("GROUP=%s ; ", evlog->rungroup);
printf("TSID=%s ; COMMAND=%s\n", idstr, evlog->command);
ret = 0;
done:
eventlog_free(evlog);
debug_return_int(ret);
}
|
3
| 0
|
int __nla_validate(const struct nlattr *head, int len, int maxtype,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack)
{
return __nla_validate_parse(head, len, maxtype, policy, validate,
extack, NULL, 0);
}
|
4
| 0
|
static int __netdev_printk(const char *level, const struct net_device *dev,
struct va_format *vaf)
{
int r;
if (dev && dev->dev.parent)
r = dev_printk(level, dev->dev.parent, "%s: %pV",
netdev_name(dev), vaf);
else if (dev)
r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
else
r = printk("%s(NULL net_device): %pV", level, vaf);
return r;
}
|
5
| 0
|
static unsigned int get_exif_ui16(struct iw_exif_state *e, unsigned int pos)
{
if(e->d_len<2 || pos>e->d_len-2) return 0;
return iw_get_ui16_e(&e->d[pos], e->endian);
}
|
6
| 1
|
atol8(const char *p, size_t char_cnt)
{
int64_t l;
int digit;
l = 0;
while (char_cnt-- > 0) {
if (*p >= '0' && *p <= '7')
digit = *p - '0';
else
break;
p++;
l <<= 3;
l |= digit;
}
return (l);
}
|
7
| 0
|
static int clie_5_attach(struct usb_serial *serial)
{
struct usb_serial_port *port;
unsigned int pipe;
int j;
/* TH55 registers 2 ports.
Communication in from the UX50/TH55 uses bulk_in_endpointAddress
from port 0. Communication out to the UX50/TH55 uses
bulk_out_endpointAddress from port 1
Lets do a quick and dirty mapping
*/
/* some sanity check */
if (serial->num_bulk_out < 2) {
dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
return -ENODEV;
}
/* port 0 now uses the modified endpoint Address */
port = serial->port[0];
port->bulk_out_endpointAddress =
serial->port[1]->bulk_out_endpointAddress;
pipe = usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress);
for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j)
port->write_urbs[j]->pipe = pipe;
return 0;
}
|
8
| 1
|
SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
struct cifs_tcon *tcon, const struct nls_table *cp)
{
struct smb2_tree_connect_req *req;
struct smb2_tree_connect_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
int resp_buftype;
int unc_path_len;
struct TCP_Server_Info *server;
__le16 *unc_path = NULL;
cifs_dbg(FYI, "TCON\n");
if ((ses->server) && tree)
server = ses->server;
else
return -EIO;
if (tcon && tcon->bad_network_name)
return -ENOENT;
unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
if (unc_path == NULL)
return -ENOMEM;
unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
unc_path_len *= 2;
if (unc_path_len < 2) {
kfree(unc_path);
return -EINVAL;
}
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
if (rc) {
kfree(unc_path);
return rc;
}
if (tcon == NULL) {
/* since no tcon, smb2_init can not do this, so do here */
req->hdr.SessionId = ses->Suid;
/* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
}
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field and 1 for pad */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
- 1 /* pad */ - 4 /* do not count rfc1001 len field */);
req->PathLength = cpu_to_le16(unc_path_len - 2);
iov[1].iov_base = unc_path;
iov[1].iov_len = unc_path_len;
inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
if (rc != 0) {
if (tcon) {
cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
tcon->need_reconnect = true;
}
goto tcon_error_exit;
}
if (tcon == NULL) {
ses->ipc_tid = rsp->hdr.TreeId;
goto tcon_exit;
}
if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
cifs_dbg(FYI, "connection to disk share\n");
else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
tcon->ipc = true;
cifs_dbg(FYI, "connection to pipe share\n");
} else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
tcon->print = true;
cifs_dbg(FYI, "connection to printer\n");
} else {
cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
rc = -EOPNOTSUPP;
goto tcon_error_exit;
}
tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
tcon->tidStatus = CifsGood;
tcon->need_reconnect = false;
tcon->tid = rsp->hdr.TreeId;
strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
init_copy_chunk_defaults(tcon);
if (tcon->ses->server->ops->validate_negotiate)
rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
tcon_exit:
free_rsp_buf(resp_buftype, rsp);
kfree(unc_path);
return rc;
tcon_error_exit:
if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
tcon->bad_network_name = true;
}
goto tcon_exit;
}
|
9
| 0
|
test_make_inputs (xd3_stream *stream, xoff_t *ss_out, xoff_t *ts_out)
{
usize_t ts = (mt_random (&static_mtrand) % TEST_FILE_MEAN) +
TEST_FILE_MEAN / 2;
usize_t ss = (mt_random (&static_mtrand) % TEST_FILE_MEAN) +
TEST_FILE_MEAN / 2;
uint8_t *buf = (uint8_t*) malloc (ts + ss), *sbuf = buf, *tbuf = buf + ss;
usize_t sadd = 0, sadd_max = (usize_t)(ss * TEST_ADD_RATIO);
FILE *tf = NULL, *sf = NULL;
usize_t i, j;
int ret;
if (buf == NULL) { return ENOMEM; }
if ((tf = fopen (TEST_TARGET_FILE, "w")) == NULL ||
(ss_out != NULL && (sf = fopen (TEST_SOURCE_FILE, "w")) == NULL))
{
stream->msg = "write failed";
ret = get_errno ();
goto failure;
}
if (ss_out != NULL)
{
for (i = 0; i < ss; )
{
sbuf[i++] = (uint8_t) mt_random (&static_mtrand);
}
}
/* Then modify the data to produce copies, everything not copied is
* an add. The following logic produces the TEST_ADD_RATIO. The
* variable SADD contains the number of adds so far, which should
* not exceed SADD_MAX. */
/* XPR(NT "ss = %u ts = %u\n", ss, ts); */
for (i = 0; i < ts; )
{
usize_t left = ts - i;
usize_t next = mt_exp_rand ((uint32_t) TEST_ADD_MEAN,
(uint32_t) TEST_ADD_MAX);
usize_t add_left = sadd_max - sadd;
double add_prob = (left == 0) ? 0 : (add_left / (double) left);
int do_copy;
next = min (left, next);
do_copy = (next > add_left ||
(mt_random (&static_mtrand) / \
(double)USIZE_T_MAX) >= add_prob);
if (ss_out == NULL)
{
do_copy &= (i > 0);
}
else
{
do_copy &= (ss - next) > 0;
}
if (do_copy)
{
/* Copy */
size_t offset = mt_random (&static_mtrand) % ((ss_out == NULL) ?
i :
(ss - next));
/* XPR(NT "[%u] copy %u at %u ", i, next, offset); */
for (j = 0; j < next; j += 1)
{
char c = ((ss_out == NULL) ? tbuf : sbuf)[offset + j];
/* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */
tbuf[i++] = c;
}
/* XPR(NT "\n"); */
}
else
{
/* Add */
/* XPR(NT "[%u] add %u ", i, next); */
for (j = 0; j < next; j += 1)
{
char c = (char) mt_random (&static_mtrand);
/* XPR(NT "%x%x", (c >> 4) & 0xf, c & 0xf); */
tbuf[i++] = c;
}
/* XPR(NT "\n"); */
sadd += next;
}
}
/* XPR(NT "sadd = %u max = %u\n", sadd, sadd_max); */
if ((fwrite (tbuf, 1, ts, tf) != ts) ||
(ss_out != NULL && (fwrite (sbuf, 1, ss, sf) != ss)))
{
stream->msg = "write failed";
ret = get_errno ();
goto failure;
}
if ((ret = fclose (tf)) || (ss_out != NULL && (ret = fclose (sf))))
{
stream->msg = "close failed";
ret = get_errno ();
goto failure;
}
if (ts_out) { (*ts_out) = ts; }
if (ss_out) { (*ss_out) = ss; }
failure:
free (buf);
return ret;
}
|
10
| 1
|
void hugetlb_put_quota(struct address_space *mapping, long delta)
{
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb);
if (sbinfo->free_blocks > -1) {
spin_lock(&sbinfo->stat_lock);
sbinfo->free_blocks += delta;
spin_unlock(&sbinfo->stat_lock);
}
}
|
11
| 0
|
static void _perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}
|
12
| 0
|
SPL_METHOD(SplFileObject, hasChildren)
{
if (zend_parse_parameters_none() == FAILURE) {
return;
}
RETURN_FALSE;
} /* }}} */
|
13
| 1
|
sixel_dither_new(
sixel_dither_t /* out */ **ppdither, /* dither object to be created */
int /* in */ ncolors, /* required colors */
sixel_allocator_t /* in */ *allocator) /* allocator, null if you use
default allocator */
{
SIXELSTATUS status = SIXEL_FALSE;
size_t headsize;
size_t datasize;
size_t wholesize;
int quality_mode;
if (ppdither == NULL) {
sixel_helper_set_additional_message(
"sixel_dither_new: ppdither is null.");
status = SIXEL_BAD_ARGUMENT;
goto end;
}
if (allocator == NULL) {
status = sixel_allocator_new(&allocator, NULL, NULL, NULL, NULL);
if (SIXEL_FAILED(status)) {
*ppdither = NULL;
goto end;
}
} else {
sixel_allocator_ref(allocator);
}
if (ncolors < 0) {
ncolors = 256;
quality_mode = SIXEL_QUALITY_HIGHCOLOR;
} else {
if (ncolors > SIXEL_PALETTE_MAX) {
ncolors = 256;
} else if (ncolors < 2) {
ncolors = 2;
}
quality_mode = SIXEL_QUALITY_LOW;
}
headsize = sizeof(sixel_dither_t);
datasize = (size_t)(ncolors * 3);
wholesize = headsize + datasize;
*ppdither = (sixel_dither_t *)sixel_allocator_malloc(allocator, wholesize);
if (*ppdither == NULL) {
sixel_allocator_unref(allocator);
sixel_helper_set_additional_message(
"sixel_dither_new: sixel_allocator_malloc() failed.");
status = SIXEL_BAD_ALLOCATION;
goto end;
}
(*ppdither)->ref = 1;
(*ppdither)->palette = (unsigned char*)(*ppdither + 1);
(*ppdither)->cachetable = NULL;
(*ppdither)->reqcolors = ncolors;
(*ppdither)->ncolors = ncolors;
(*ppdither)->origcolors = (-1);
(*ppdither)->keycolor = (-1);
(*ppdither)->optimized = 0;
(*ppdither)->optimize_palette = 0;
(*ppdither)->complexion = 1;
(*ppdither)->bodyonly = 0;
(*ppdither)->method_for_largest = SIXEL_LARGE_NORM;
(*ppdither)->method_for_rep = SIXEL_REP_CENTER_BOX;
(*ppdither)->method_for_diffuse = SIXEL_DIFFUSE_FS;
(*ppdither)->quality_mode = quality_mode;
(*ppdither)->pixelformat = SIXEL_PIXELFORMAT_RGB888;
(*ppdither)->allocator = allocator;
status = SIXEL_OK;
end:
return status;
}
|
14
| 1
|
int handle_popc(u32 insn, struct pt_regs *regs)
{
u64 value;
int ret, i, rd = ((insn >> 25) & 0x1f);
int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
if (insn & 0x2000) {
maybe_flush_windows(0, 0, rd, from_kernel);
value = sign_extend_imm13(insn);
} else {
maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
value = fetch_reg(insn & 0x1f, regs);
}
for (ret = 0, i = 0; i < 16; i++) {
ret += popc_helper[value & 0xf];
value >>= 4;
}
if (rd < 16) {
if (rd)
regs->u_regs[rd] = ret;
} else {
if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
put_user(ret, &win32->locals[rd - 16]);
} else {
struct reg_window __user *win;
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
put_user(ret, &win->locals[rd - 16]);
}
}
advance(regs);
return 1;
}
|
15
| 0
|
avp_enum(struct l2tp_avp *avp, const u_char *pkt, int pktlen, int filldata)
{
uint16_t flags;
L2TP_SUBR_ASSERT(pktlen >= 6);
if (pktlen < 6)
return -1;
GETSHORT(flags, pkt);
avp->is_mandatory = ((flags & 0x8000) != 0)? 1 : 0;
avp->is_hidden = ((flags & 0x4000) != 0)? 1 : 0;
avp->length = flags & 0x03ff;
GETSHORT(avp->vendor_id, pkt);
avp->attr_type = *pkt << 8;
avp->attr_type |= *(pkt + 1);
pkt += 2;
if (avp->length < 6 || avp->length > pktlen)
return -1;
if (avp->length > 6 && filldata != 0)
memcpy(avp->attr_value, pkt, avp->length - 6);
return avp->length;
}
|
16
| 0
|
sysObjectID_handler(snmp_varbind_t *varbind, snmp_oid_t *oid)
{
OID(sysObjectID_oid, 1, 3, 6, 1, 4, 1, 54352);
snmp_api_set_oid(varbind, oid, &sysObjectID_oid);
}
|
17
| 0
|
vhost_scsi_make_tpg(struct se_wwn *wwn,
struct config_group *group,
const char *name)
{
struct vhost_scsi_tport *tport = container_of(wwn,
struct vhost_scsi_tport, tport_wwn);
struct vhost_scsi_tpg *tpg;
u16 tpgt;
int ret;
if (strstr(name, "tpgt_") != name)
return ERR_PTR(-EINVAL);
if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
return ERR_PTR(-EINVAL);
tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
if (!tpg) {
pr_err("Unable to allocate struct vhost_scsi_tpg");
return ERR_PTR(-ENOMEM);
}
mutex_init(&tpg->tv_tpg_mutex);
INIT_LIST_HEAD(&tpg->tv_tpg_list);
tpg->tport = tport;
tpg->tport_tpgt = tpgt;
ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
return NULL;
}
mutex_lock(&vhost_scsi_mutex);
list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
mutex_unlock(&vhost_scsi_mutex);
return &tpg->se_tpg;
}
|
18
| 1
|
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
unsigned char max_level = 0;
int unix_sock_count = 0;
for (i = scm->fp->count - 1; i >= 0; i--) {
struct sock *sk = unix_get_socket(scm->fp->fp[i]);
if (sk) {
unix_sock_count++;
max_level = max(max_level,
unix_sk(sk)->recursion_level);
}
}
if (unlikely(max_level > MAX_RECURSION_LEVEL))
return -ETOOMANYREFS;
/*
* Need to duplicate file references for the sake of garbage
* collection. Otherwise a socket in the fps might become a
* candidate for GC while the skb is not yet queued.
*/
UNIXCB(skb).fp = scm_fp_dup(scm->fp);
if (!UNIXCB(skb).fp)
return -ENOMEM;
if (unix_sock_count) {
for (i = scm->fp->count - 1; i >= 0; i--)
unix_inflight(scm->fp->fp[i]);
}
return max_level;
}
|
19
| 1
|
LIBOPENMPT_MODPLUG_API unsigned int ModPlug_SampleName(ModPlugFile* file, unsigned int qual, char* buff)
{
const char* str;
unsigned int retval;
size_t tmpretval;
if(!file) return 0;
str = openmpt_module_get_sample_name(file->mod,qual-1);
if(!str){
if(buff){
*buff = '\0';
}
return 0;
}
tmpretval = strlen(str);
if(tmpretval>=INT_MAX){
tmpretval = INT_MAX-1;
}
retval = (int)tmpretval;
if(buff){
memcpy(buff,str,retval+1);
buff[retval] = '\0';
}
openmpt_free_string(str);
return retval;
}
|
20
| 1
|
Status GetTensorArray(OpKernelContext* ctx, TensorArray** tensor_array) {
string container;
string ta_handle;
if (ctx->input_dtype(0) != DT_RESOURCE) {
TF_RETURN_IF_ERROR(GetHandle(ctx, &container, &ta_handle));
ResourceMgr* rm = ctx->resource_manager();
if (rm == nullptr) return errors::Internal("No resource manager.");
TF_RETURN_IF_ERROR(
ctx->step_container()->Lookup(rm, container + ta_handle, tensor_array));
return OkStatus();
} else {
return LookupResource(ctx, HandleFromInput(ctx, 0), tensor_array);
}
}
|
21
| 1
|
static struct vm_area_struct *vma_to_resize(unsigned long addr,
unsigned long old_len, unsigned long new_len, unsigned long *p)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma = find_vma(mm, addr);
if (!vma || vma->vm_start > addr)
goto Efault;
if (is_vm_hugetlb_page(vma))
goto Einval;
/* We can't remap across vm area boundaries */
if (old_len > vma->vm_end - addr)
goto Efault;
if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
if (new_len > old_len)
goto Efault;
}
if (vma->vm_flags & VM_LOCKED) {
unsigned long locked, lock_limit;
locked = mm->locked_vm << PAGE_SHIFT;
lock_limit = rlimit(RLIMIT_MEMLOCK);
locked += new_len - old_len;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
goto Eagain;
}
if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
goto Enomem;
if (vma->vm_flags & VM_ACCOUNT) {
unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
if (security_vm_enough_memory(charged))
goto Efault;
*p = charged;
}
return vma;
Efault: /* very odd choice for most of the cases, but... */
return ERR_PTR(-EFAULT);
Einval:
return ERR_PTR(-EINVAL);
Enomem:
return ERR_PTR(-ENOMEM);
Eagain:
return ERR_PTR(-EAGAIN);
}
|
22
| 0
|
static int bmp_getint32(jas_stream_t *in, int_fast32_t *val)
{
int n;
uint_fast32_t v;
int c;
for (n = 4, v = 0;;) {
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v |= (JAS_CAST(uint_fast32_t, c) << 24);
if (--n <= 0) {
break;
}
v >>= 8;
}
if (val) {
*val = v;
}
return 0;
}
|
23
| 1
|
uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
uint32_t val;
k->get_config(vdev, vdev->config);
if (addr > (vdev->config_len - sizeof(val)))
return (uint32_t)-1;
val = ldl_p(vdev->config + addr);
return val;
}
|
24
| 1
|
static VALUE read_memory(VALUE klass, VALUE content)
{
xmlSchemaPtr schema;
xmlSchemaParserCtxtPtr ctx = xmlSchemaNewMemParserCtxt(
(const char *)StringValuePtr(content),
(int)RSTRING_LEN(content)
);
VALUE rb_schema;
VALUE errors = rb_ary_new();
xmlSetStructuredErrorFunc((void *)errors, Nokogiri_error_array_pusher);
#ifdef HAVE_XMLSCHEMASETPARSERSTRUCTUREDERRORS
xmlSchemaSetParserStructuredErrors(
ctx,
Nokogiri_error_array_pusher,
(void *)errors
);
#endif
schema = xmlSchemaParse(ctx);
xmlSetStructuredErrorFunc(NULL, NULL);
xmlSchemaFreeParserCtxt(ctx);
if(NULL == schema) {
xmlErrorPtr error = xmlGetLastError();
if(error)
Nokogiri_error_raise(NULL, error);
else
rb_raise(rb_eRuntimeError, "Could not parse document");
return Qnil;
}
rb_schema = Data_Wrap_Struct(klass, 0, dealloc, schema);
rb_iv_set(rb_schema, "@errors", errors);
return rb_schema;
}
|
25
| 1
|
static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
get_block_t *get_block;
/*
* Fallback to buffered I/O if we see an inode without
* extents.
*/
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
return 0;
/* Fallback to buffered I/O if we do not support append dio. */
if (iocb->ki_pos + iter->count > i_size_read(inode) &&
!ocfs2_supports_append_dio(osb))
return 0;
if (iov_iter_rw(iter) == READ)
get_block = ocfs2_get_block;
else
get_block = ocfs2_dio_get_block;
return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
iter, get_block,
ocfs2_dio_end_io, NULL, 0);
}
|
26
| 0
|
static bool ParseSampler(Sampler *sampler, std::string *err, const json &o,
bool store_original_json_for_extras_and_extensions) {
ParseStringProperty(&sampler->name, err, o, "name", false);
int minFilter = -1;
int magFilter = -1;
int wrapS = TINYGLTF_TEXTURE_WRAP_REPEAT;
int wrapT = TINYGLTF_TEXTURE_WRAP_REPEAT;
// int wrapR = TINYGLTF_TEXTURE_WRAP_REPEAT;
ParseIntegerProperty(&minFilter, err, o, "minFilter", false);
ParseIntegerProperty(&magFilter, err, o, "magFilter", false);
ParseIntegerProperty(&wrapS, err, o, "wrapS", false);
ParseIntegerProperty(&wrapT, err, o, "wrapT", false);
// ParseIntegerProperty(&wrapR, err, o, "wrapR", false); // tinygltf
// extension
// TODO(syoyo): Check the value is alloed one.
// (e.g. we allow 9728(NEAREST), but don't allow 9727)
sampler->minFilter = minFilter;
sampler->magFilter = magFilter;
sampler->wrapS = wrapS;
sampler->wrapT = wrapT;
// sampler->wrapR = wrapR;
ParseExtensionsProperty(&(sampler->extensions), err, o);
ParseExtrasProperty(&(sampler->extras), o);
if (store_original_json_for_extras_and_extensions) {
{
json_const_iterator it;
if (FindMember(o, "extensions", it)) {
sampler->extensions_json_string = JsonToString(GetValue(it));
}
}
{
json_const_iterator it;
if (FindMember(o, "extras", it)) {
sampler->extras_json_string = JsonToString(GetValue(it));
}
}
}
return true;
}
|
27
| 0
|
apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
ap_input_mode_t mode, apr_read_type_e block,
apr_off_t readbytes)
{
core_server_config *conf;
apr_bucket *e;
http_ctx_t *ctx = f->ctx;
apr_status_t rv;
apr_off_t totalread;
int again;
conf = (core_server_config *)
ap_get_module_config(f->r->server->module_config, &core_module);
/* just get out of the way of things we don't want. */
if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) {
return ap_get_brigade(f->next, b, mode, block, readbytes);
}
if (!ctx) {
const char *tenc, *lenp;
f->ctx = ctx = apr_pcalloc(f->r->pool, sizeof(*ctx));
ctx->state = BODY_NONE;
/* LimitRequestBody does not apply to proxied responses.
* Consider implementing this check in its own filter.
* Would adding a directive to limit the size of proxied
* responses be useful?
*/
if (!f->r->proxyreq) {
ctx->limit = ap_get_limit_req_body(f->r);
}
else {
ctx->limit = 0;
}
tenc = apr_table_get(f->r->headers_in, "Transfer-Encoding");
lenp = apr_table_get(f->r->headers_in, "Content-Length");
if (tenc) {
if (strcasecmp(tenc, "chunked") == 0 /* fast path */
|| ap_find_last_token(f->r->pool, tenc, "chunked")) {
ctx->state = BODY_CHUNK;
}
else if (f->r->proxyreq == PROXYREQ_RESPONSE) {
/* http://tools.ietf.org/html/draft-ietf-httpbis-p1-messaging-23
* Section 3.3.3.3: "If a Transfer-Encoding header field is
* present in a response and the chunked transfer coding is not
* the final encoding, the message body length is determined by
* reading the connection until it is closed by the server."
*/
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(02555)
"Unknown Transfer-Encoding: %s; "
"using read-until-close", tenc);
tenc = NULL;
}
else {
/* Something that isn't a HTTP request, unless some future
* edition defines new transfer encodings, is unsupported.
*/
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01585)
"Unknown Transfer-Encoding: %s", tenc);
return APR_EGENERAL;
}
lenp = NULL;
}
if (lenp) {
char *endstr;
ctx->state = BODY_LENGTH;
/* Protects against over/underflow, non-digit chars in the
* string (excluding leading space) (the endstr checks)
* and a negative number. */
if (apr_strtoff(&ctx->remaining, lenp, &endstr, 10)
|| endstr == lenp || *endstr || ctx->remaining < 0) {
ctx->remaining = 0;
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01587)
"Invalid Content-Length");
return APR_EINVAL;
}
/* If we have a limit in effect and we know the C-L ahead of
* time, stop it here if it is invalid.
*/
if (ctx->limit && ctx->limit < ctx->remaining) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01588)
"Requested content-length of %" APR_OFF_T_FMT
" is larger than the configured limit"
" of %" APR_OFF_T_FMT, ctx->remaining, ctx->limit);
return APR_ENOSPC;
}
}
/* If we don't have a request entity indicated by the headers, EOS.
* (BODY_NONE is a valid intermediate state due to trailers,
* but it isn't a valid starting state.)
*
* RFC 2616 Section 4.4 note 5 states that connection-close
* is invalid for a request entity - request bodies must be
* denoted by C-L or T-E: chunked.
*
* Note that since the proxy uses this filter to handle the
* proxied *response*, proxy responses MUST be exempt.
*/
if (ctx->state == BODY_NONE && f->r->proxyreq != PROXYREQ_RESPONSE) {
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
ctx->eos_sent = 1;
return APR_SUCCESS;
}
/* Since we're about to read data, send 100-Continue if needed.
* Only valid on chunked and C-L bodies where the C-L is > 0. */
if ((ctx->state == BODY_CHUNK
|| (ctx->state == BODY_LENGTH && ctx->remaining > 0))
&& f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1)
&& !(f->r->eos_sent || f->r->bytes_sent)) {
if (!ap_is_HTTP_SUCCESS(f->r->status)) {
ctx->state = BODY_NONE;
ctx->eos_sent = 1;
}
else {
char *tmp;
int len;
apr_bucket_brigade *bb;
bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
/* if we send an interim response, we're no longer
* in a state of expecting one.
*/
f->r->expecting_100 = 0;
tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL " ",
ap_get_status_line(HTTP_CONTINUE), CRLF CRLF, NULL);
len = strlen(tmp);
ap_xlate_proto_to_ascii(tmp, len);
e = apr_bucket_pool_create(tmp, len, f->r->pool,
f->c->bucket_alloc);
APR_BRIGADE_INSERT_HEAD(bb, e);
e = apr_bucket_flush_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(bb, e);
rv = ap_pass_brigade(f->c->output_filters, bb);
apr_brigade_cleanup(bb);
if (rv != APR_SUCCESS) {
return AP_FILTER_ERROR;
}
}
}
}
/* sanity check in case we're read twice */
if (ctx->eos_sent) {
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
return APR_SUCCESS;
}
do {
apr_brigade_cleanup(b);
again = 0; /* until further notice */
/* read and handle the brigade */
switch (ctx->state) {
case BODY_CHUNK:
case BODY_CHUNK_PART:
case BODY_CHUNK_EXT:
case BODY_CHUNK_LF:
case BODY_CHUNK_END:
case BODY_CHUNK_END_LF: {
rv = ap_get_brigade(f->next, b, AP_MODE_GETLINE, block, 0);
/* for timeout */
if (block == APR_NONBLOCK_READ
&& ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
|| (APR_STATUS_IS_EAGAIN(rv)))) {
return APR_EAGAIN;
}
if (rv == APR_EOF) {
return APR_INCOMPLETE;
}
if (rv != APR_SUCCESS) {
return rv;
}
e = APR_BRIGADE_FIRST(b);
while (e != APR_BRIGADE_SENTINEL(b)) {
const char *buffer;
apr_size_t len;
if (!APR_BUCKET_IS_METADATA(e)) {
rv = apr_bucket_read(e, &buffer, &len, APR_BLOCK_READ);
if (rv == APR_SUCCESS) {
rv = parse_chunk_size(ctx, buffer, len,
f->r->server->limit_req_fieldsize);
}
if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, rv, f->r, APLOGNO(01590)
"Error reading chunk %s ",
(APR_ENOSPC == rv) ? "(overflow)" : "");
return rv;
}
}
apr_bucket_delete(e);
e = APR_BRIGADE_FIRST(b);
}
again = 1; /* come around again */
if (ctx->state == BODY_CHUNK_TRAILER) {
/* Treat UNSET as DISABLE - trailers aren't merged by default */
return read_chunked_trailers(ctx, f, b,
conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE);
}
break;
}
case BODY_NONE:
case BODY_LENGTH:
case BODY_CHUNK_DATA: {
/* Ensure that the caller can not go over our boundary point. */
if (ctx->state != BODY_NONE && ctx->remaining < readbytes) {
readbytes = ctx->remaining;
}
if (readbytes > 0) {
rv = ap_get_brigade(f->next, b, mode, block, readbytes);
/* for timeout */
if (block == APR_NONBLOCK_READ
&& ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
|| (APR_STATUS_IS_EAGAIN(rv)))) {
return APR_EAGAIN;
}
if (rv == APR_EOF && ctx->state != BODY_NONE
&& ctx->remaining > 0) {
return APR_INCOMPLETE;
}
if (rv != APR_SUCCESS) {
return rv;
}
/* How many bytes did we just read? */
apr_brigade_length(b, 0, &totalread);
/* If this happens, we have a bucket of unknown length. Die because
* it means our assumptions have changed. */
AP_DEBUG_ASSERT(totalread >= 0);
if (ctx->state != BODY_NONE) {
ctx->remaining -= totalread;
if (ctx->remaining > 0) {
e = APR_BRIGADE_LAST(b);
if (APR_BUCKET_IS_EOS(e)) {
apr_bucket_delete(e);
return APR_INCOMPLETE;
}
}
else if (ctx->state == BODY_CHUNK_DATA) {
/* next chunk please */
ctx->state = BODY_CHUNK_END;
ctx->chunk_used = 0;
}
}
}
/* If we have no more bytes remaining on a C-L request,
* save the caller a round trip to discover EOS.
*/
if (ctx->state == BODY_LENGTH && ctx->remaining == 0) {
e = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(b, e);
ctx->eos_sent = 1;
}
/* We have a limit in effect. */
if (ctx->limit) {
/* FIXME: Note that we might get slightly confused on chunked inputs
* as we'd need to compensate for the chunk lengths which may not
* really count. This seems to be up for interpretation. */
ctx->limit_used += totalread;
if (ctx->limit < ctx->limit_used) {
ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, f->r, APLOGNO(01591)
"Read content-length of %" APR_OFF_T_FMT
" is larger than the configured limit"
" of %" APR_OFF_T_FMT, ctx->limit_used, ctx->limit);
return APR_ENOSPC;
}
}
break;
}
case BODY_CHUNK_TRAILER: {
rv = ap_get_brigade(f->next, b, mode, block, readbytes);
/* for timeout */
if (block == APR_NONBLOCK_READ
&& ((rv == APR_SUCCESS && APR_BRIGADE_EMPTY(b))
|| (APR_STATUS_IS_EAGAIN(rv)))) {
return APR_EAGAIN;
}
if (rv != APR_SUCCESS) {
return rv;
}
break;
}
default: {
/* Should not happen */
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r, APLOGNO(02901)
"Unexpected body state (%i)", (int)ctx->state);
return APR_EGENERAL;
}
}
} while (again);
return APR_SUCCESS;
}
|
28
| 0
|
DiscoveredParticipantInfo(AuthenticationStatus auth_status) :
identity_handle_(nullptr), handshake_handle_(nullptr),
auth_status_(auth_status), last_sequence_number_(1)
{}
|
29
| 0
|
bool IsIdentityConsumingSwitch(const MutableGraphView& graph,
const NodeDef& node) {
if ((IsIdentity(node) || IsIdentityNSingleInput(node)) &&
node.input_size() > 0) {
TensorId tensor_id = ParseTensorName(node.input(0));
if (IsTensorIdControlling(tensor_id)) {
return false;
}
NodeDef* input_node = graph.GetNode(tensor_id.node());
if (input_node == nullptr) {
return false;
}
return IsSwitch(*input_node);
}
return false;
}
|
30
| 1
|
l2tp_framing_cap_print(netdissect_options *ndo, const u_char *dat)
{
const uint32_t *ptr = (const uint32_t *)dat;
if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_CAP_ASYNC_MASK) {
ND_PRINT((ndo, "A"));
}
if (EXTRACT_32BITS(ptr) & L2TP_FRAMING_CAP_SYNC_MASK) {
ND_PRINT((ndo, "S"));
}
}
|
31
| 1
|
find_file (const char *currpath, grub_fshelp_node_t currroot,
grub_fshelp_node_t *currfound,
struct grub_fshelp_find_file_closure *c)
{
#ifndef _MSC_VER
char fpath[grub_strlen (currpath) + 1];
#else
char *fpath = grub_malloc (grub_strlen (currpath) + 1);
#endif
char *name = fpath;
char *next;
enum grub_fshelp_filetype type = GRUB_FSHELP_DIR;
grub_fshelp_node_t currnode = currroot;
grub_fshelp_node_t oldnode = currroot;
c->currroot = currroot;
grub_strncpy (fpath, currpath, grub_strlen (currpath) + 1);
/* Remove all leading slashes. */
while (*name == '/')
name++;
if (! *name)
{
*currfound = currnode;
return 0;
}
for (;;)
{
int found;
struct find_file_closure cc;
/* Extract the actual part from the pathname. */
next = grub_strchr (name, '/');
if (next)
{
/* Remove all leading slashes. */
while (*next == '/')
*(next++) = '\0';
}
/* At this point it is expected that the current node is a
directory, check if this is true. */
if (type != GRUB_FSHELP_DIR)
{
free_node (currnode, c);
return grub_error (GRUB_ERR_BAD_FILE_TYPE, "not a directory");
}
cc.name = name;
cc.type = &type;
cc.oldnode = &oldnode;
cc.currnode = &currnode;
/* Iterate over the directory. */
found = c->iterate_dir (currnode, iterate, &cc);
if (! found)
{
if (grub_errno)
return grub_errno;
break;
}
/* Read in the symlink and follow it. */
if (type == GRUB_FSHELP_SYMLINK)
{
char *symlink;
/* Test if the symlink does not loop. */
if (++(c->symlinknest) == 8)
{
free_node (currnode, c);
free_node (oldnode, c);
return grub_error (GRUB_ERR_SYMLINK_LOOP,
"too deep nesting of symlinks");
}
symlink = c->read_symlink (currnode);
free_node (currnode, c);
if (!symlink)
{
free_node (oldnode, c);
return grub_errno;
}
/* The symlink is an absolute path, go back to the root inode. */
if (symlink[0] == '/')
{
free_node (oldnode, c);
oldnode = c->rootnode;
}
/* Lookup the node the symlink points to. */
find_file (symlink, oldnode, &currnode, c);
type = c->foundtype;
grub_free (symlink);
if (grub_errno)
{
free_node (oldnode, c);
return grub_errno;
}
}
free_node (oldnode, c);
/* Found the node! */
if (! next || *next == '\0')
{
*currfound = currnode;
c->foundtype = type;
return 0;
}
name = next;
}
return grub_error (GRUB_ERR_FILE_NOT_FOUND, "file not found");
}
|
32
| 0
|
GF_Err abst_box_read(GF_Box *s, GF_BitStream *bs)
{
GF_AdobeBootstrapInfoBox *ptr = (GF_AdobeBootstrapInfoBox *)s;
int i;
u32 tmp_strsize;
char *tmp_str;
GF_Err e;
ISOM_DECREASE_SIZE(ptr, 25)
ptr->bootstrapinfo_version = gf_bs_read_u32(bs);
ptr->profile = gf_bs_read_int(bs, 2);
ptr->live = gf_bs_read_int(bs, 1);
ptr->update = gf_bs_read_int(bs, 1);
ptr->reserved = gf_bs_read_int(bs, 4);
ptr->time_scale = gf_bs_read_u32(bs);
ptr->current_media_time = gf_bs_read_u64(bs);
ptr->smpte_time_code_offset = gf_bs_read_u64(bs);
i=0;
if (ptr->size<8) return GF_ISOM_INVALID_FILE;
tmp_strsize =(u32)ptr->size-8;
tmp_str = gf_malloc(sizeof(char)*tmp_strsize);
if (!tmp_str) return GF_OUT_OF_MEM;
memset(tmp_str, 0, sizeof(char)*tmp_strsize);
while (tmp_strsize) {
ISOM_DECREASE_SIZE(ptr, 1)
tmp_str[i] = gf_bs_read_u8(bs);
tmp_strsize--;
if (!tmp_str[i])
break;
i++;
}
if (i) {
ptr->movie_identifier = gf_strdup(tmp_str);
}
ISOM_DECREASE_SIZE(ptr, 1)
ptr->server_entry_count = gf_bs_read_u8(bs);
for (i=0; i<ptr->server_entry_count; i++) {
int j=0;
tmp_strsize=(u32)ptr->size;
while (tmp_strsize) {
ISOM_DECREASE_SIZE(ptr, 1)
tmp_str[j] = gf_bs_read_u8(bs);
tmp_strsize--;
if (!tmp_str[j])
break;
j++;
}
if (j) {
gf_list_insert(ptr->server_entry_table, gf_strdup(tmp_str), i);
}
}
ISOM_DECREASE_SIZE(ptr, 1)
ptr->quality_entry_count = gf_bs_read_u8(bs);
for (i=0; i<ptr->quality_entry_count; i++) {
int j=0;
tmp_strsize=(u32)ptr->size;
while (tmp_strsize) {
ISOM_DECREASE_SIZE(ptr, 1)
tmp_str[j] = gf_bs_read_u8(bs);
tmp_strsize--;
if (!tmp_str[j])
break;
j++;
}
if (j) {
gf_list_insert(ptr->quality_entry_table, gf_strdup(tmp_str), i);
}
}
i=0;
tmp_strsize=(u32)ptr->size;
while (tmp_strsize) {
ISOM_DECREASE_SIZE(ptr, 1)
tmp_str[i] = gf_bs_read_u8(bs);
tmp_strsize--;
if (!tmp_str[i])
break;
i++;
}
if (i) {
ptr->drm_data = gf_strdup(tmp_str);
}
i=0;
tmp_strsize=(u32)ptr->size;
while (tmp_strsize) {
ISOM_DECREASE_SIZE(ptr, 1)
tmp_str[i] = gf_bs_read_u8(bs);
tmp_strsize--;
if (!tmp_str[i])
break;
i++;
}
if (i) {
ptr->meta_data = gf_strdup(tmp_str);
}
ISOM_DECREASE_SIZE(ptr, 1)
ptr->segment_run_table_count = gf_bs_read_u8(bs);
for (i=0; i<ptr->segment_run_table_count; i++) {
GF_AdobeSegmentRunTableBox *asrt = NULL;
e = gf_isom_box_parse((GF_Box **)&asrt, bs);
if (e) {
if (asrt) gf_isom_box_del((GF_Box*)asrt);
gf_free(tmp_str);
return e;
}
gf_list_add(ptr->segment_run_table_entries, asrt);
}
ISOM_DECREASE_SIZE(ptr, 1)
ptr->fragment_run_table_count = gf_bs_read_u8(bs);
for (i=0; i<ptr->fragment_run_table_count; i++) {
GF_AdobeFragmentRunTableBox *afrt = NULL;
e = gf_isom_box_parse((GF_Box **)&afrt, bs);
if (e) {
if (afrt) gf_isom_box_del((GF_Box*)afrt);
gf_free(tmp_str);
return e;
}
gf_list_add(ptr->fragment_run_table_entries, afrt);
}
gf_free(tmp_str);
return GF_OK;
}
|
33
| 0
|
void Compute(OpKernelContext* context) override {
const Tensor& tensor_in = context->input(0);
const Tensor& tensor_out = context->input(1);
const Tensor& out_backprop = context->input(2);
OP_REQUIRES(context, tensor_in.dims() == 5,
errors::InvalidArgument("tensor_in must be 5-dimensional"));
OP_REQUIRES(context, tensor_out.dims() == 5,
errors::InvalidArgument("tensor_out must be 5-dimensional"));
OP_REQUIRES(context, out_backprop.dims() == 5,
errors::InvalidArgument("out_backprop must be 5-dimensional"));
const TensorShape& output_shape = tensor_in.shape();
Tensor* input_backprop;
OP_REQUIRES_OK(context,
context->allocate_output(0, output_shape, &input_backprop));
std::array<int64_t, 3> input_size{
{GetTensorDim(output_shape, data_format_, '2'),
GetTensorDim(output_shape, data_format_, '1'),
GetTensorDim(output_shape, data_format_, '0')}};
std::array<int64_t, 3> window{{GetTensorDim(ksize_, data_format_, '2'),
GetTensorDim(ksize_, data_format_, '1'),
GetTensorDim(ksize_, data_format_, '0')}};
std::array<int64_t, 3> stride{{GetTensorDim(stride_, data_format_, '2'),
GetTensorDim(stride_, data_format_, '1'),
GetTensorDim(stride_, data_format_, '0')}};
std::array<int64_t, 3> out, padding;
OP_REQUIRES_OK(context, Get3dOutputSize(input_size, window, stride,
padding_, &out, &padding));
const int64_t depth = GetTensorDim(tensor_in, data_format_, 'C');
const int64_t in_batch = GetTensorDim(tensor_in, data_format_, 'N');
TensorShape out_shape = ShapeFromFormat(data_format_, in_batch,
{{out[2], out[1], out[0]}}, depth);
OP_REQUIRES(
context, tensor_out.shape() == out_shape,
errors::InvalidArgument("Expected orig_output shape to be ", out_shape,
", but got ", tensor_out.shape()));
OP_REQUIRES(context, out_backprop.shape() == out_shape,
errors::InvalidArgument("Expected grad shape to be ", out_shape,
", but got ", out_backprop.shape()));
LaunchMaxPooling3dGradOp<Device, T>::launch(
context, tensor_in, tensor_out, out_backprop, window, stride, out,
padding, data_format_, input_backprop);
}
|
34
| 0
|
unsigned int GetUVarBE(const unsigned int& nPos, const unsigned int& nSize, bool *pbSuccess)
{
//*pbSuccess = true;
if ( m_nLen < nSize || nPos > (m_nLen - nSize) )
{
*pbSuccess = false;
return 0;
}
unsigned int nRes = 0;
for ( int nIndex = 0; nIndex < nSize; ++nIndex )
nRes = (nRes << 8) + m_sFile[nPos + nIndex];
return nRes;
}
|
35
| 0
|
static void ntlm_free_message_fields_buffer(NTLM_MESSAGE_FIELDS* fields)
{
if (fields)
{
if (fields->Buffer)
{
free(fields->Buffer);
fields->Len = 0;
fields->MaxLen = 0;
fields->Buffer = NULL;
fields->BufferOffset = 0;
}
}
}
|
36
| 0
|
pf_test_state_icmp(struct pf_pdesc *pd, struct pf_state **stp,
u_short *reason)
{
u_int16_t virtual_id, virtual_type;
u_int8_t icmptype, icmpcode;
int icmp_dir, iidx, ret, copyback = 0;
struct pf_state_key_cmp key;
switch (pd->proto) {
case IPPROTO_ICMP:
icmptype = pd->hdr.icmp.icmp_type;
icmpcode = pd->hdr.icmp.icmp_code;
break;
#ifdef INET6
case IPPROTO_ICMPV6:
icmptype = pd->hdr.icmp6.icmp6_type;
icmpcode = pd->hdr.icmp6.icmp6_code;
break;
#endif /* INET6 */
default:
panic("unhandled proto %d", pd->proto);
}
if (pf_icmp_mapping(pd, icmptype, &icmp_dir, &virtual_id,
&virtual_type) == 0) {
/*
* ICMP query/reply message not related to a TCP/UDP packet.
* Search for an ICMP state.
*/
ret = pf_icmp_state_lookup(pd, &key, stp,
virtual_id, virtual_type, icmp_dir, &iidx,
0, 0);
/* IPv6? try matching a multicast address */
if (ret == PF_DROP && pd->af == AF_INET6 && icmp_dir == PF_OUT)
ret = pf_icmp_state_lookup(pd, &key, stp, virtual_id,
virtual_type, icmp_dir, &iidx, 1, 0);
if (ret >= 0)
return (ret);
(*stp)->expire = getuptime();
pf_update_state_timeout(*stp, PFTM_ICMP_ERROR_REPLY);
/* translate source/destination address, if necessary */
if ((*stp)->key[PF_SK_WIRE] != (*stp)->key[PF_SK_STACK]) {
struct pf_state_key *nk;
int afto, sidx, didx;
if (PF_REVERSED_KEY((*stp)->key, pd->af))
nk = (*stp)->key[pd->sidx];
else
nk = (*stp)->key[pd->didx];
afto = pd->af != nk->af;
sidx = afto ? pd->didx : pd->sidx;
didx = afto ? pd->sidx : pd->didx;
iidx = afto ? !iidx : iidx;
#ifdef INET6
if (afto) {
pf_addrcpy(&pd->nsaddr, &nk->addr[sidx],
nk->af);
pf_addrcpy(&pd->ndaddr, &nk->addr[didx],
nk->af);
pd->naf = nk->af;
}
#endif /* INET6 */
if (!afto) {
pf_translate_a(pd, pd->src, &nk->addr[sidx]);
pf_translate_a(pd, pd->dst, &nk->addr[didx]);
}
if (pd->rdomain != nk->rdomain)
pd->destchg = 1;
if (!afto && PF_ANEQ(pd->dst,
&nk->addr[didx], pd->af))
pd->destchg = 1;
pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
switch (pd->af) {
case AF_INET:
#ifdef INET6
if (afto) {
if (pf_translate_icmp_af(pd, AF_INET6,
&pd->hdr.icmp))
return (PF_DROP);
pd->proto = IPPROTO_ICMPV6;
}
#endif /* INET6 */
pf_patch_16(pd,
&pd->hdr.icmp.icmp_id, nk->port[iidx]);
m_copyback(pd->m, pd->off, ICMP_MINLEN,
&pd->hdr.icmp, M_NOWAIT);
copyback = 1;
break;
#ifdef INET6
case AF_INET6:
if (afto) {
if (pf_translate_icmp_af(pd, AF_INET,
&pd->hdr.icmp6))
return (PF_DROP);
pd->proto = IPPROTO_ICMP;
}
pf_patch_16(pd,
&pd->hdr.icmp6.icmp6_id, nk->port[iidx]);
m_copyback(pd->m, pd->off,
sizeof(struct icmp6_hdr), &pd->hdr.icmp6,
M_NOWAIT);
copyback = 1;
break;
#endif /* INET6 */
}
#ifdef INET6
if (afto)
return (PF_AFRT);
#endif /* INET6 */
}
} else {
/*
* ICMP error message in response to a TCP/UDP packet.
* Extract the inner TCP/UDP header and search for that state.
*/
struct pf_pdesc pd2;
struct ip h2;
#ifdef INET6
struct ip6_hdr h2_6;
#endif /* INET6 */
int ipoff2;
/* Initialize pd2 fields valid for both packets with pd. */
memset(&pd2, 0, sizeof(pd2));
pd2.af = pd->af;
pd2.dir = pd->dir;
pd2.kif = pd->kif;
pd2.m = pd->m;
pd2.rdomain = pd->rdomain;
/* Payload packet is from the opposite direction. */
pd2.sidx = (pd2.dir == PF_IN) ? 1 : 0;
pd2.didx = (pd2.dir == PF_IN) ? 0 : 1;
switch (pd->af) {
case AF_INET:
/* offset of h2 in mbuf chain */
ipoff2 = pd->off + ICMP_MINLEN;
if (!pf_pull_hdr(pd2.m, ipoff2, &h2, sizeof(h2),
reason, pd2.af)) {
DPFPRINTF(LOG_NOTICE,
"ICMP error message too short (ip)");
return (PF_DROP);
}
/*
* ICMP error messages don't refer to non-first
* fragments
*/
if (h2.ip_off & htons(IP_OFFMASK)) {
REASON_SET(reason, PFRES_FRAG);
return (PF_DROP);
}
/* offset of protocol header that follows h2 */
pd2.off = ipoff2;
if (pf_walk_header(&pd2, &h2, reason) != PF_PASS)
return (PF_DROP);
pd2.tot_len = ntohs(h2.ip_len);
pd2.src = (struct pf_addr *)&h2.ip_src;
pd2.dst = (struct pf_addr *)&h2.ip_dst;
break;
#ifdef INET6
case AF_INET6:
ipoff2 = pd->off + sizeof(struct icmp6_hdr);
if (!pf_pull_hdr(pd2.m, ipoff2, &h2_6, sizeof(h2_6),
reason, pd2.af)) {
DPFPRINTF(LOG_NOTICE,
"ICMP error message too short (ip6)");
return (PF_DROP);
}
pd2.off = ipoff2;
if (pf_walk_header6(&pd2, &h2_6, reason) != PF_PASS)
return (PF_DROP);
pd2.tot_len = ntohs(h2_6.ip6_plen) +
sizeof(struct ip6_hdr);
pd2.src = (struct pf_addr *)&h2_6.ip6_src;
pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
break;
#endif /* INET6 */
default:
unhandled_af(pd->af);
}
if (PF_ANEQ(pd->dst, pd2.src, pd->af)) {
if (pf_status.debug >= LOG_NOTICE) {
log(LOG_NOTICE,
"pf: BAD ICMP %d:%d outer dst: ",
icmptype, icmpcode);
pf_print_host(pd->src, 0, pd->af);
addlog(" -> ");
pf_print_host(pd->dst, 0, pd->af);
addlog(" inner src: ");
pf_print_host(pd2.src, 0, pd2.af);
addlog(" -> ");
pf_print_host(pd2.dst, 0, pd2.af);
addlog("\n");
}
REASON_SET(reason, PFRES_BADSTATE);
return (PF_DROP);
}
switch (pd2.proto) {
case IPPROTO_TCP: {
struct tcphdr *th = &pd2.hdr.tcp;
u_int32_t seq;
struct pf_state_peer *src, *dst;
u_int8_t dws;
int action;
/*
* Only the first 8 bytes of the TCP header can be
* expected. Don't access any TCP header fields after
* th_seq, an ackskew test is not possible.
*/
if (!pf_pull_hdr(pd2.m, pd2.off, th, 8, reason,
pd2.af)) {
DPFPRINTF(LOG_NOTICE,
"ICMP error message too short (tcp)");
return (PF_DROP);
}
key.af = pd2.af;
key.proto = IPPROTO_TCP;
key.rdomain = pd2.rdomain;
pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af);
key.port[pd2.sidx] = th->th_sport;
key.port[pd2.didx] = th->th_dport;
key.hash = pf_pkt_hash(pd2.af, pd2.proto,
pd2.src, pd2.dst, th->th_sport, th->th_dport);
action = pf_find_state(&pd2, &key, stp);
if (action != PF_MATCH)
return (action);
if (pd2.dir == (*stp)->direction) {
if (PF_REVERSED_KEY((*stp)->key, pd->af)) {
src = &(*stp)->src;
dst = &(*stp)->dst;
} else {
src = &(*stp)->dst;
dst = &(*stp)->src;
}
} else {
if (PF_REVERSED_KEY((*stp)->key, pd->af)) {
src = &(*stp)->dst;
dst = &(*stp)->src;
} else {
src = &(*stp)->src;
dst = &(*stp)->dst;
}
}
if (src->wscale && dst->wscale)
dws = dst->wscale & PF_WSCALE_MASK;
else
dws = 0;
/* Demodulate sequence number */
seq = ntohl(th->th_seq) - src->seqdiff;
if (src->seqdiff) {
pf_patch_32(pd, &th->th_seq, htonl(seq));
copyback = 1;
}
if (!((*stp)->state_flags & PFSTATE_SLOPPY) &&
(!SEQ_GEQ(src->seqhi, seq) || !SEQ_GEQ(seq,
src->seqlo - (dst->max_win << dws)))) {
if (pf_status.debug >= LOG_NOTICE) {
log(LOG_NOTICE,
"pf: BAD ICMP %d:%d ",
icmptype, icmpcode);
pf_print_host(pd->src, 0, pd->af);
addlog(" -> ");
pf_print_host(pd->dst, 0, pd->af);
addlog(" state: ");
pf_print_state(*stp);
addlog(" seq=%u\n", seq);
}
REASON_SET(reason, PFRES_BADSTATE);
return (PF_DROP);
} else {
if (pf_status.debug >= LOG_DEBUG) {
log(LOG_DEBUG,
"pf: OK ICMP %d:%d ",
icmptype, icmpcode);
pf_print_host(pd->src, 0, pd->af);
addlog(" -> ");
pf_print_host(pd->dst, 0, pd->af);
addlog(" state: ");
pf_print_state(*stp);
addlog(" seq=%u\n", seq);
}
}
/* translate source/destination address, if necessary */
if ((*stp)->key[PF_SK_WIRE] !=
(*stp)->key[PF_SK_STACK]) {
struct pf_state_key *nk;
int afto, sidx, didx;
if (PF_REVERSED_KEY((*stp)->key, pd->af))
nk = (*stp)->key[pd->sidx];
else
nk = (*stp)->key[pd->didx];
afto = pd->af != nk->af;
sidx = afto ? pd2.didx : pd2.sidx;
didx = afto ? pd2.sidx : pd2.didx;
#ifdef INET6
if (afto) {
if (pf_translate_icmp_af(pd, nk->af,
&pd->hdr.icmp))
return (PF_DROP);
m_copyback(pd->m, pd->off,
sizeof(struct icmp6_hdr),
&pd->hdr.icmp6, M_NOWAIT);
if (pf_change_icmp_af(pd->m, ipoff2,
pd, &pd2, &nk->addr[sidx],
&nk->addr[didx], pd->af, nk->af))
return (PF_DROP);
if (nk->af == AF_INET)
pd->proto = IPPROTO_ICMP;
else
pd->proto = IPPROTO_ICMPV6;
pd->m->m_pkthdr.ph_rtableid =
nk->rdomain;
pd->destchg = 1;
pf_addrcpy(&pd->nsaddr,
&nk->addr[pd2.sidx], nk->af);
pf_addrcpy(&pd->ndaddr,
&nk->addr[pd2.didx], nk->af);
pd->naf = nk->af;
pf_patch_16(pd,
&th->th_sport, nk->port[sidx]);
pf_patch_16(pd,
&th->th_dport, nk->port[didx]);
m_copyback(pd2.m, pd2.off, 8, th,
M_NOWAIT);
return (PF_AFRT);
}
#endif /* INET6 */
if (PF_ANEQ(pd2.src,
&nk->addr[pd2.sidx], pd2.af) ||
nk->port[pd2.sidx] != th->th_sport)
pf_translate_icmp(pd, pd2.src,
&th->th_sport, pd->dst,
&nk->addr[pd2.sidx],
nk->port[pd2.sidx]);
if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
pd2.af) || pd2.rdomain != nk->rdomain)
pd->destchg = 1;
pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
if (PF_ANEQ(pd2.dst,
&nk->addr[pd2.didx], pd2.af) ||
nk->port[pd2.didx] != th->th_dport)
pf_translate_icmp(pd, pd2.dst,
&th->th_dport, pd->src,
&nk->addr[pd2.didx],
nk->port[pd2.didx]);
copyback = 1;
}
if (copyback) {
switch (pd2.af) {
case AF_INET:
m_copyback(pd->m, pd->off, ICMP_MINLEN,
&pd->hdr.icmp, M_NOWAIT);
m_copyback(pd2.m, ipoff2, sizeof(h2),
&h2, M_NOWAIT);
break;
#ifdef INET6
case AF_INET6:
m_copyback(pd->m, pd->off,
sizeof(struct icmp6_hdr),
&pd->hdr.icmp6, M_NOWAIT);
m_copyback(pd2.m, ipoff2, sizeof(h2_6),
&h2_6, M_NOWAIT);
break;
#endif /* INET6 */
}
m_copyback(pd2.m, pd2.off, 8, th, M_NOWAIT);
}
break;
}
case IPPROTO_UDP: {
struct udphdr *uh = &pd2.hdr.udp;
int action;
if (!pf_pull_hdr(pd2.m, pd2.off, uh, sizeof(*uh),
reason, pd2.af)) {
DPFPRINTF(LOG_NOTICE,
"ICMP error message too short (udp)");
return (PF_DROP);
}
key.af = pd2.af;
key.proto = IPPROTO_UDP;
key.rdomain = pd2.rdomain;
pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af);
key.port[pd2.sidx] = uh->uh_sport;
key.port[pd2.didx] = uh->uh_dport;
key.hash = pf_pkt_hash(pd2.af, pd2.proto,
pd2.src, pd2.dst, uh->uh_sport, uh->uh_dport);
action = pf_find_state(&pd2, &key, stp);
if (action != PF_MATCH)
return (action);
/* translate source/destination address, if necessary */
if ((*stp)->key[PF_SK_WIRE] !=
(*stp)->key[PF_SK_STACK]) {
struct pf_state_key *nk;
int afto, sidx, didx;
if (PF_REVERSED_KEY((*stp)->key, pd->af))
nk = (*stp)->key[pd->sidx];
else
nk = (*stp)->key[pd->didx];
afto = pd->af != nk->af;
sidx = afto ? pd2.didx : pd2.sidx;
didx = afto ? pd2.sidx : pd2.didx;
#ifdef INET6
if (afto) {
if (pf_translate_icmp_af(pd, nk->af,
&pd->hdr.icmp))
return (PF_DROP);
m_copyback(pd->m, pd->off,
sizeof(struct icmp6_hdr),
&pd->hdr.icmp6, M_NOWAIT);
if (pf_change_icmp_af(pd->m, ipoff2,
pd, &pd2, &nk->addr[sidx],
&nk->addr[didx], pd->af, nk->af))
return (PF_DROP);
if (nk->af == AF_INET)
pd->proto = IPPROTO_ICMP;
else
pd->proto = IPPROTO_ICMPV6;
pd->m->m_pkthdr.ph_rtableid =
nk->rdomain;
pd->destchg = 1;
pf_addrcpy(&pd->nsaddr,
&nk->addr[pd2.sidx], nk->af);
pf_addrcpy(&pd->ndaddr,
&nk->addr[pd2.didx], nk->af);
pd->naf = nk->af;
pf_patch_16(pd,
&uh->uh_sport, nk->port[sidx]);
pf_patch_16(pd,
&uh->uh_dport, nk->port[didx]);
m_copyback(pd2.m, pd2.off, sizeof(*uh),
uh, M_NOWAIT);
return (PF_AFRT);
}
#endif /* INET6 */
if (PF_ANEQ(pd2.src,
&nk->addr[pd2.sidx], pd2.af) ||
nk->port[pd2.sidx] != uh->uh_sport)
pf_translate_icmp(pd, pd2.src,
&uh->uh_sport, pd->dst,
&nk->addr[pd2.sidx],
nk->port[pd2.sidx]);
if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
pd2.af) || pd2.rdomain != nk->rdomain)
pd->destchg = 1;
pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
if (PF_ANEQ(pd2.dst,
&nk->addr[pd2.didx], pd2.af) ||
nk->port[pd2.didx] != uh->uh_dport)
pf_translate_icmp(pd, pd2.dst,
&uh->uh_dport, pd->src,
&nk->addr[pd2.didx],
nk->port[pd2.didx]);
switch (pd2.af) {
case AF_INET:
m_copyback(pd->m, pd->off, ICMP_MINLEN,
&pd->hdr.icmp, M_NOWAIT);
m_copyback(pd2.m, ipoff2, sizeof(h2),
&h2, M_NOWAIT);
break;
#ifdef INET6
case AF_INET6:
m_copyback(pd->m, pd->off,
sizeof(struct icmp6_hdr),
&pd->hdr.icmp6, M_NOWAIT);
m_copyback(pd2.m, ipoff2, sizeof(h2_6),
&h2_6, M_NOWAIT);
break;
#endif /* INET6 */
}
/* Avoid recomputing quoted UDP checksum.
* note: udp6 0 csum invalid per rfc2460 p27.
* but presumed nothing cares in this context */
pf_patch_16(pd, &uh->uh_sum, 0);
m_copyback(pd2.m, pd2.off, sizeof(*uh), uh,
M_NOWAIT);
copyback = 1;
}
break;
}
case IPPROTO_ICMP: {
struct icmp *iih = &pd2.hdr.icmp;
if (pd2.af != AF_INET) {
REASON_SET(reason, PFRES_NORM);
return (PF_DROP);
}
if (!pf_pull_hdr(pd2.m, pd2.off, iih, ICMP_MINLEN,
reason, pd2.af)) {
DPFPRINTF(LOG_NOTICE,
"ICMP error message too short (icmp)");
return (PF_DROP);
}
pf_icmp_mapping(&pd2, iih->icmp_type,
&icmp_dir, &virtual_id, &virtual_type);
ret = pf_icmp_state_lookup(&pd2, &key, stp,
virtual_id, virtual_type, icmp_dir, &iidx, 0, 1);
if (ret >= 0)
return (ret);
/* translate source/destination address, if necessary */
if ((*stp)->key[PF_SK_WIRE] !=
(*stp)->key[PF_SK_STACK]) {
struct pf_state_key *nk;
int afto, sidx, didx;
if (PF_REVERSED_KEY((*stp)->key, pd->af))
nk = (*stp)->key[pd->sidx];
else
nk = (*stp)->key[pd->didx];
afto = pd->af != nk->af;
sidx = afto ? pd2.didx : pd2.sidx;
didx = afto ? pd2.sidx : pd2.didx;
iidx = afto ? !iidx : iidx;
#ifdef INET6
if (afto) {
if (nk->af != AF_INET6)
return (PF_DROP);
if (pf_translate_icmp_af(pd, nk->af,
&pd->hdr.icmp))
return (PF_DROP);
m_copyback(pd->m, pd->off,
sizeof(struct icmp6_hdr),
&pd->hdr.icmp6, M_NOWAIT);
if (pf_change_icmp_af(pd->m, ipoff2,
pd, &pd2, &nk->addr[sidx],
&nk->addr[didx], pd->af, nk->af))
return (PF_DROP);
pd->proto = IPPROTO_ICMPV6;
if (pf_translate_icmp_af(pd,
nk->af, iih))
return (PF_DROP);
if (virtual_type == htons(ICMP_ECHO))
pf_patch_16(pd, &iih->icmp_id,
nk->port[iidx]);
m_copyback(pd2.m, pd2.off, ICMP_MINLEN,
iih, M_NOWAIT);
pd->m->m_pkthdr.ph_rtableid =
nk->rdomain;
pd->destchg = 1;
pf_addrcpy(&pd->nsaddr,
&nk->addr[pd2.sidx], nk->af);
pf_addrcpy(&pd->ndaddr,
&nk->addr[pd2.didx], nk->af);
pd->naf = nk->af;
return (PF_AFRT);
}
#endif /* INET6 */
if (PF_ANEQ(pd2.src,
&nk->addr[pd2.sidx], pd2.af) ||
(virtual_type == htons(ICMP_ECHO) &&
nk->port[iidx] != iih->icmp_id))
pf_translate_icmp(pd, pd2.src,
(virtual_type == htons(ICMP_ECHO)) ?
&iih->icmp_id : NULL,
pd->dst, &nk->addr[pd2.sidx],
(virtual_type == htons(ICMP_ECHO)) ?
nk->port[iidx] : 0);
if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
pd2.af) || pd2.rdomain != nk->rdomain)
pd->destchg = 1;
pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
if (PF_ANEQ(pd2.dst,
&nk->addr[pd2.didx], pd2.af))
pf_translate_icmp(pd, pd2.dst, NULL,
pd->src, &nk->addr[pd2.didx], 0);
m_copyback(pd->m, pd->off, ICMP_MINLEN,
&pd->hdr.icmp, M_NOWAIT);
m_copyback(pd2.m, ipoff2, sizeof(h2), &h2,
M_NOWAIT);
m_copyback(pd2.m, pd2.off, ICMP_MINLEN, iih,
M_NOWAIT);
copyback = 1;
}
break;
}
#ifdef INET6
case IPPROTO_ICMPV6: {
struct icmp6_hdr *iih = &pd2.hdr.icmp6;
if (pd2.af != AF_INET6) {
REASON_SET(reason, PFRES_NORM);
return (PF_DROP);
}
if (!pf_pull_hdr(pd2.m, pd2.off, iih,
sizeof(struct icmp6_hdr), reason, pd2.af)) {
DPFPRINTF(LOG_NOTICE,
"ICMP error message too short (icmp6)");
return (PF_DROP);
}
pf_icmp_mapping(&pd2, iih->icmp6_type,
&icmp_dir, &virtual_id, &virtual_type);
ret = pf_icmp_state_lookup(&pd2, &key, stp,
virtual_id, virtual_type, icmp_dir, &iidx, 0, 1);
/* IPv6? try matching a multicast address */
if (ret == PF_DROP && pd2.af == AF_INET6 &&
icmp_dir == PF_OUT)
ret = pf_icmp_state_lookup(&pd2, &key, stp,
virtual_id, virtual_type, icmp_dir, &iidx,
1, 1);
if (ret >= 0)
return (ret);
/* translate source/destination address, if necessary */
if ((*stp)->key[PF_SK_WIRE] !=
(*stp)->key[PF_SK_STACK]) {
struct pf_state_key *nk;
int afto, sidx, didx;
if (PF_REVERSED_KEY((*stp)->key, pd->af))
nk = (*stp)->key[pd->sidx];
else
nk = (*stp)->key[pd->didx];
afto = pd->af != nk->af;
sidx = afto ? pd2.didx : pd2.sidx;
didx = afto ? pd2.sidx : pd2.didx;
iidx = afto ? !iidx : iidx;
if (afto) {
if (nk->af != AF_INET)
return (PF_DROP);
if (pf_translate_icmp_af(pd, nk->af,
&pd->hdr.icmp))
return (PF_DROP);
m_copyback(pd->m, pd->off,
sizeof(struct icmp6_hdr),
&pd->hdr.icmp6, M_NOWAIT);
if (pf_change_icmp_af(pd->m, ipoff2,
pd, &pd2, &nk->addr[sidx],
&nk->addr[didx], pd->af, nk->af))
return (PF_DROP);
pd->proto = IPPROTO_ICMP;
if (pf_translate_icmp_af(pd,
nk->af, iih))
return (PF_DROP);
if (virtual_type ==
htons(ICMP6_ECHO_REQUEST))
pf_patch_16(pd, &iih->icmp6_id,
nk->port[iidx]);
m_copyback(pd2.m, pd2.off,
sizeof(struct icmp6_hdr), iih,
M_NOWAIT);
pd->m->m_pkthdr.ph_rtableid =
nk->rdomain;
pd->destchg = 1;
pf_addrcpy(&pd->nsaddr,
&nk->addr[pd2.sidx], nk->af);
pf_addrcpy(&pd->ndaddr,
&nk->addr[pd2.didx], nk->af);
pd->naf = nk->af;
return (PF_AFRT);
}
if (PF_ANEQ(pd2.src,
&nk->addr[pd2.sidx], pd2.af) ||
((virtual_type ==
htons(ICMP6_ECHO_REQUEST)) &&
nk->port[pd2.sidx] != iih->icmp6_id))
pf_translate_icmp(pd, pd2.src,
(virtual_type ==
htons(ICMP6_ECHO_REQUEST))
? &iih->icmp6_id : NULL,
pd->dst, &nk->addr[pd2.sidx],
(virtual_type ==
htons(ICMP6_ECHO_REQUEST))
? nk->port[iidx] : 0);
if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
pd2.af) || pd2.rdomain != nk->rdomain)
pd->destchg = 1;
pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
if (PF_ANEQ(pd2.dst,
&nk->addr[pd2.didx], pd2.af))
pf_translate_icmp(pd, pd2.dst, NULL,
pd->src, &nk->addr[pd2.didx], 0);
m_copyback(pd->m, pd->off,
sizeof(struct icmp6_hdr), &pd->hdr.icmp6,
M_NOWAIT);
m_copyback(pd2.m, ipoff2, sizeof(h2_6), &h2_6,
M_NOWAIT);
m_copyback(pd2.m, pd2.off,
sizeof(struct icmp6_hdr), iih, M_NOWAIT);
copyback = 1;
}
break;
}
#endif /* INET6 */
default: {
int action;
key.af = pd2.af;
key.proto = pd2.proto;
key.rdomain = pd2.rdomain;
pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af);
key.port[0] = key.port[1] = 0;
key.hash = pf_pkt_hash(pd2.af, pd2.proto,
pd2.src, pd2.dst, 0, 0);
action = pf_find_state(&pd2, &key, stp);
if (action != PF_MATCH)
return (action);
/* translate source/destination address, if necessary */
if ((*stp)->key[PF_SK_WIRE] !=
(*stp)->key[PF_SK_STACK]) {
struct pf_state_key *nk =
(*stp)->key[pd->didx];
if (PF_ANEQ(pd2.src,
&nk->addr[pd2.sidx], pd2.af))
pf_translate_icmp(pd, pd2.src, NULL,
pd->dst, &nk->addr[pd2.sidx], 0);
if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
pd2.af) || pd2.rdomain != nk->rdomain)
pd->destchg = 1;
pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
if (PF_ANEQ(pd2.dst,
&nk->addr[pd2.didx], pd2.af))
pf_translate_icmp(pd, pd2.dst, NULL,
pd->src, &nk->addr[pd2.didx], 0);
switch (pd2.af) {
case AF_INET:
m_copyback(pd->m, pd->off, ICMP_MINLEN,
&pd->hdr.icmp, M_NOWAIT);
m_copyback(pd2.m, ipoff2, sizeof(h2),
&h2, M_NOWAIT);
break;
#ifdef INET6
case AF_INET6:
m_copyback(pd->m, pd->off,
sizeof(struct icmp6_hdr),
&pd->hdr.icmp6, M_NOWAIT);
m_copyback(pd2.m, ipoff2, sizeof(h2_6),
&h2_6, M_NOWAIT);
break;
#endif /* INET6 */
}
copyback = 1;
}
break;
}
}
}
if (copyback) {
m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT);
}
return (PF_PASS);
}
|
37
| 0
|
TIFFPrintDirectory(TIFF* tif, FILE* fd, long flags)
{
TIFFDirectory *td = &tif->tif_dir;
char *sep;
long l, n;
#if defined(__WIN32__) && (defined(_MSC_VER) || defined(__MINGW32__))
fprintf(fd, "TIFF Directory at offset 0x%I64x (%I64u)\n",
(unsigned __int64) tif->tif_diroff,
(unsigned __int64) tif->tif_diroff);
#else
fprintf(fd, "TIFF Directory at offset 0x%llx (%llu)\n",
(unsigned long long) tif->tif_diroff,
(unsigned long long) tif->tif_diroff);
#endif
if (TIFFFieldSet(tif,FIELD_SUBFILETYPE)) {
fprintf(fd, " Subfile Type:");
sep = " ";
if (td->td_subfiletype & FILETYPE_REDUCEDIMAGE) {
fprintf(fd, "%sreduced-resolution image", sep);
sep = "/";
}
if (td->td_subfiletype & FILETYPE_PAGE) {
fprintf(fd, "%smulti-page document", sep);
sep = "/";
}
if (td->td_subfiletype & FILETYPE_MASK)
fprintf(fd, "%stransparency mask", sep);
fprintf(fd, " (%lu = 0x%lx)\n",
(unsigned long) td->td_subfiletype, (long) td->td_subfiletype);
}
if (TIFFFieldSet(tif,FIELD_IMAGEDIMENSIONS)) {
fprintf(fd, " Image Width: %lu Image Length: %lu",
(unsigned long) td->td_imagewidth, (unsigned long) td->td_imagelength);
if (TIFFFieldSet(tif,FIELD_IMAGEDEPTH))
fprintf(fd, " Image Depth: %lu",
(unsigned long) td->td_imagedepth);
fprintf(fd, "\n");
}
if (TIFFFieldSet(tif,FIELD_TILEDIMENSIONS)) {
fprintf(fd, " Tile Width: %lu Tile Length: %lu",
(unsigned long) td->td_tilewidth, (unsigned long) td->td_tilelength);
if (TIFFFieldSet(tif,FIELD_TILEDEPTH))
fprintf(fd, " Tile Depth: %lu",
(unsigned long) td->td_tiledepth);
fprintf(fd, "\n");
}
if (TIFFFieldSet(tif,FIELD_RESOLUTION)) {
fprintf(fd, " Resolution: %g, %g",
td->td_xresolution, td->td_yresolution);
if (TIFFFieldSet(tif,FIELD_RESOLUTIONUNIT)) {
switch (td->td_resolutionunit) {
case RESUNIT_NONE:
fprintf(fd, " (unitless)");
break;
case RESUNIT_INCH:
fprintf(fd, " pixels/inch");
break;
case RESUNIT_CENTIMETER:
fprintf(fd, " pixels/cm");
break;
default:
fprintf(fd, " (unit %u = 0x%x)",
td->td_resolutionunit,
td->td_resolutionunit);
break;
}
}
fprintf(fd, "\n");
}
if (TIFFFieldSet(tif,FIELD_POSITION))
fprintf(fd, " Position: %g, %g\n",
td->td_xposition, td->td_yposition);
if (TIFFFieldSet(tif,FIELD_BITSPERSAMPLE))
fprintf(fd, " Bits/Sample: %u\n", td->td_bitspersample);
if (TIFFFieldSet(tif,FIELD_SAMPLEFORMAT)) {
fprintf(fd, " Sample Format: ");
switch (td->td_sampleformat) {
case SAMPLEFORMAT_VOID:
fprintf(fd, "void\n");
break;
case SAMPLEFORMAT_INT:
fprintf(fd, "signed integer\n");
break;
case SAMPLEFORMAT_UINT:
fprintf(fd, "unsigned integer\n");
break;
case SAMPLEFORMAT_IEEEFP:
fprintf(fd, "IEEE floating point\n");
break;
case SAMPLEFORMAT_COMPLEXINT:
fprintf(fd, "complex signed integer\n");
break;
case SAMPLEFORMAT_COMPLEXIEEEFP:
fprintf(fd, "complex IEEE floating point\n");
break;
default:
fprintf(fd, "%u (0x%x)\n",
td->td_sampleformat, td->td_sampleformat);
break;
}
}
if (TIFFFieldSet(tif,FIELD_COMPRESSION)) {
const TIFFCodec* c = TIFFFindCODEC(td->td_compression);
fprintf(fd, " Compression Scheme: ");
if (c)
fprintf(fd, "%s\n", c->name);
else
fprintf(fd, "%u (0x%x)\n",
td->td_compression, td->td_compression);
}
if (TIFFFieldSet(tif,FIELD_PHOTOMETRIC)) {
fprintf(fd, " Photometric Interpretation: ");
if (td->td_photometric < NPHOTONAMES)
fprintf(fd, "%s\n", photoNames[td->td_photometric]);
else {
switch (td->td_photometric) {
case PHOTOMETRIC_LOGL:
fprintf(fd, "CIE Log2(L)\n");
break;
case PHOTOMETRIC_LOGLUV:
fprintf(fd, "CIE Log2(L) (u',v')\n");
break;
default:
fprintf(fd, "%u (0x%x)\n",
td->td_photometric, td->td_photometric);
break;
}
}
}
if (TIFFFieldSet(tif,FIELD_EXTRASAMPLES) && td->td_extrasamples) {
uint16 i;
fprintf(fd, " Extra Samples: %u<", td->td_extrasamples);
sep = "";
for (i = 0; i < td->td_extrasamples; i++) {
switch (td->td_sampleinfo[i]) {
case EXTRASAMPLE_UNSPECIFIED:
fprintf(fd, "%sunspecified", sep);
break;
case EXTRASAMPLE_ASSOCALPHA:
fprintf(fd, "%sassoc-alpha", sep);
break;
case EXTRASAMPLE_UNASSALPHA:
fprintf(fd, "%sunassoc-alpha", sep);
break;
default:
fprintf(fd, "%s%u (0x%x)", sep,
td->td_sampleinfo[i], td->td_sampleinfo[i]);
break;
}
sep = ", ";
}
fprintf(fd, ">\n");
}
if (TIFFFieldSet(tif,FIELD_INKNAMES)) {
char* cp;
uint16 i;
fprintf(fd, " Ink Names: ");
i = td->td_samplesperpixel;
sep = "";
for (cp = td->td_inknames;
i > 0 && cp < td->td_inknames + td->td_inknameslen;
cp = strchr(cp,'\0')+1, i--) {
size_t max_chars =
td->td_inknameslen - (cp - td->td_inknames);
fputs(sep, fd);
_TIFFprintAsciiBounded(fd, cp, max_chars);
sep = ", ";
}
fputs("\n", fd);
}
if (TIFFFieldSet(tif,FIELD_THRESHHOLDING)) {
fprintf(fd, " Thresholding: ");
switch (td->td_threshholding) {
case THRESHHOLD_BILEVEL:
fprintf(fd, "bilevel art scan\n");
break;
case THRESHHOLD_HALFTONE:
fprintf(fd, "halftone or dithered scan\n");
break;
case THRESHHOLD_ERRORDIFFUSE:
fprintf(fd, "error diffused\n");
break;
default:
fprintf(fd, "%u (0x%x)\n",
td->td_threshholding, td->td_threshholding);
break;
}
}
if (TIFFFieldSet(tif,FIELD_FILLORDER)) {
fprintf(fd, " FillOrder: ");
switch (td->td_fillorder) {
case FILLORDER_MSB2LSB:
fprintf(fd, "msb-to-lsb\n");
break;
case FILLORDER_LSB2MSB:
fprintf(fd, "lsb-to-msb\n");
break;
default:
fprintf(fd, "%u (0x%x)\n",
td->td_fillorder, td->td_fillorder);
break;
}
}
if (TIFFFieldSet(tif,FIELD_YCBCRSUBSAMPLING))
{
fprintf(fd, " YCbCr Subsampling: %u, %u\n",
td->td_ycbcrsubsampling[0], td->td_ycbcrsubsampling[1] );
}
if (TIFFFieldSet(tif,FIELD_YCBCRPOSITIONING)) {
fprintf(fd, " YCbCr Positioning: ");
switch (td->td_ycbcrpositioning) {
case YCBCRPOSITION_CENTERED:
fprintf(fd, "centered\n");
break;
case YCBCRPOSITION_COSITED:
fprintf(fd, "cosited\n");
break;
default:
fprintf(fd, "%u (0x%x)\n",
td->td_ycbcrpositioning, td->td_ycbcrpositioning);
break;
}
}
if (TIFFFieldSet(tif,FIELD_HALFTONEHINTS))
fprintf(fd, " Halftone Hints: light %u dark %u\n",
td->td_halftonehints[0], td->td_halftonehints[1]);
if (TIFFFieldSet(tif,FIELD_ORIENTATION)) {
fprintf(fd, " Orientation: ");
if (td->td_orientation < NORIENTNAMES)
fprintf(fd, "%s\n", orientNames[td->td_orientation]);
else
fprintf(fd, "%u (0x%x)\n",
td->td_orientation, td->td_orientation);
}
if (TIFFFieldSet(tif,FIELD_SAMPLESPERPIXEL))
fprintf(fd, " Samples/Pixel: %u\n", td->td_samplesperpixel);
if (TIFFFieldSet(tif,FIELD_ROWSPERSTRIP)) {
fprintf(fd, " Rows/Strip: ");
if (td->td_rowsperstrip == (uint32) -1)
fprintf(fd, "(infinite)\n");
else
fprintf(fd, "%lu\n", (unsigned long) td->td_rowsperstrip);
}
if (TIFFFieldSet(tif,FIELD_MINSAMPLEVALUE))
fprintf(fd, " Min Sample Value: %u\n", td->td_minsamplevalue);
if (TIFFFieldSet(tif,FIELD_MAXSAMPLEVALUE))
fprintf(fd, " Max Sample Value: %u\n", td->td_maxsamplevalue);
if (TIFFFieldSet(tif,FIELD_SMINSAMPLEVALUE)) {
int i;
int count = (tif->tif_flags & TIFF_PERSAMPLE) ? td->td_samplesperpixel : 1;
fprintf(fd, " SMin Sample Value:");
for (i = 0; i < count; ++i)
fprintf(fd, " %g", td->td_sminsamplevalue[i]);
fprintf(fd, "\n");
}
if (TIFFFieldSet(tif,FIELD_SMAXSAMPLEVALUE)) {
int i;
int count = (tif->tif_flags & TIFF_PERSAMPLE) ? td->td_samplesperpixel : 1;
fprintf(fd, " SMax Sample Value:");
for (i = 0; i < count; ++i)
fprintf(fd, " %g", td->td_smaxsamplevalue[i]);
fprintf(fd, "\n");
}
if (TIFFFieldSet(tif,FIELD_PLANARCONFIG)) {
fprintf(fd, " Planar Configuration: ");
switch (td->td_planarconfig) {
case PLANARCONFIG_CONTIG:
fprintf(fd, "single image plane\n");
break;
case PLANARCONFIG_SEPARATE:
fprintf(fd, "separate image planes\n");
break;
default:
fprintf(fd, "%u (0x%x)\n",
td->td_planarconfig, td->td_planarconfig);
break;
}
}
if (TIFFFieldSet(tif,FIELD_PAGENUMBER))
fprintf(fd, " Page Number: %u-%u\n",
td->td_pagenumber[0], td->td_pagenumber[1]);
if (TIFFFieldSet(tif,FIELD_COLORMAP)) {
fprintf(fd, " Color Map: ");
if (flags & TIFFPRINT_COLORMAP) {
fprintf(fd, "\n");
n = 1L<<td->td_bitspersample;
for (l = 0; l < n; l++)
fprintf(fd, " %5ld: %5u %5u %5u\n",
l,
td->td_colormap[0][l],
td->td_colormap[1][l],
td->td_colormap[2][l]);
} else
fprintf(fd, "(present)\n");
}
if (TIFFFieldSet(tif,FIELD_REFBLACKWHITE)) {
int i;
fprintf(fd, " Reference Black/White:\n");
for (i = 0; i < 3; i++)
fprintf(fd, " %2d: %5g %5g\n", i,
td->td_refblackwhite[2*i+0],
td->td_refblackwhite[2*i+1]);
}
if (TIFFFieldSet(tif,FIELD_TRANSFERFUNCTION)) {
fprintf(fd, " Transfer Function: ");
if (flags & TIFFPRINT_CURVES) {
fprintf(fd, "\n");
n = 1L<<td->td_bitspersample;
for (l = 0; l < n; l++) {
uint16 i;
fprintf(fd, " %2ld: %5u",
l, td->td_transferfunction[0][l]);
for (i = 1; i < td->td_samplesperpixel; i++)
fprintf(fd, " %5u",
td->td_transferfunction[i][l]);
fputc('\n', fd);
}
} else
fprintf(fd, "(present)\n");
}
if (TIFFFieldSet(tif, FIELD_SUBIFD) && (td->td_subifd)) {
uint16 i;
fprintf(fd, " SubIFD Offsets:");
for (i = 0; i < td->td_nsubifd; i++)
#if defined(__WIN32__) && (defined(_MSC_VER) || defined(__MINGW32__))
fprintf(fd, " %5I64u",
(unsigned __int64) td->td_subifd[i]);
#else
fprintf(fd, " %5llu",
(unsigned long long) td->td_subifd[i]);
#endif
fputc('\n', fd);
}
/*
** Custom tag support.
*/
{
int i;
short count;
count = (short) TIFFGetTagListCount(tif);
for(i = 0; i < count; i++) {
uint32 tag = TIFFGetTagListEntry(tif, i);
const TIFFField *fip;
uint32 value_count;
int mem_alloc = 0;
void *raw_data;
fip = TIFFFieldWithTag(tif, tag);
if(fip == NULL)
continue;
if(fip->field_passcount) {
if (fip->field_readcount == TIFF_VARIABLE2 ) {
if(TIFFGetField(tif, tag, &value_count, &raw_data) != 1)
continue;
} else if (fip->field_readcount == TIFF_VARIABLE ) {
uint16 small_value_count;
if(TIFFGetField(tif, tag, &small_value_count, &raw_data) != 1)
continue;
value_count = small_value_count;
} else {
assert (fip->field_readcount == TIFF_VARIABLE
|| fip->field_readcount == TIFF_VARIABLE2);
continue;
}
} else {
if (fip->field_readcount == TIFF_VARIABLE
|| fip->field_readcount == TIFF_VARIABLE2)
value_count = 1;
else if (fip->field_readcount == TIFF_SPP)
value_count = td->td_samplesperpixel;
else
value_count = fip->field_readcount;
if (fip->field_tag == TIFFTAG_DOTRANGE
&& strcmp(fip->field_name,"DotRange") == 0) {
/* TODO: This is an evil exception and should not have been
handled this way ... likely best if we move it into
the directory structure with an explicit field in
libtiff 4.1 and assign it a FIELD_ value */
static uint16 dotrange[2];
raw_data = dotrange;
TIFFGetField(tif, tag, dotrange+0, dotrange+1);
} else if (fip->field_type == TIFF_ASCII
|| fip->field_readcount == TIFF_VARIABLE
|| fip->field_readcount == TIFF_VARIABLE2
|| fip->field_readcount == TIFF_SPP
|| value_count > 1) {
if(TIFFGetField(tif, tag, &raw_data) != 1)
continue;
} else {
raw_data = _TIFFmalloc(
_TIFFDataSize(fip->field_type)
* value_count);
mem_alloc = 1;
if(TIFFGetField(tif, tag, raw_data) != 1) {
_TIFFfree(raw_data);
continue;
}
}
}
/*
* Catch the tags which needs to be specially handled
* and pretty print them. If tag not handled in
* _TIFFPrettyPrintField() fall down and print it as
* any other tag.
*/
if (!_TIFFPrettyPrintField(tif, fip, fd, tag, value_count, raw_data))
_TIFFPrintField(fd, fip, value_count, raw_data);
if(mem_alloc)
_TIFFfree(raw_data);
}
}
if (tif->tif_tagmethods.printdir)
(*tif->tif_tagmethods.printdir)(tif, fd, flags);
_TIFFFillStriles( tif );
if ((flags & TIFFPRINT_STRIPS) &&
TIFFFieldSet(tif,FIELD_STRIPOFFSETS)) {
uint32 s;
fprintf(fd, " %lu %s:\n",
(unsigned long) td->td_nstrips,
isTiled(tif) ? "Tiles" : "Strips");
for (s = 0; s < td->td_nstrips; s++)
#if defined(__WIN32__) && (defined(_MSC_VER) || defined(__MINGW32__))
fprintf(fd, " %3lu: [%8I64u, %8I64u]\n",
(unsigned long) s,
td->td_stripoffset ? (unsigned __int64) td->td_stripoffset[s] : 0,
td->td_stripbytecount ? (unsigned __int64) td->td_stripbytecount[s] : 0);
#else
fprintf(fd, " %3lu: [%8llu, %8llu]\n",
(unsigned long) s,
td->td_stripoffset ? (unsigned long long) td->td_stripoffset[s] : 0,
td->td_stripbytecount ? (unsigned long long) td->td_stripbytecount[s] : 0);
#endif
}
}
|
38
| 1
|
PHPAPI char *php_escape_html_entities_ex(unsigned char *old, size_t oldlen, size_t *newlen, int all, int flags, char *hint_charset, zend_bool double_encode TSRMLS_DC)
{
size_t cursor, maxlen, len;
char *replaced;
enum entity_charset charset = determine_charset(hint_charset TSRMLS_CC);
int doctype = flags & ENT_HTML_DOC_TYPE_MASK;
entity_table_opt entity_table;
const enc_to_uni *to_uni_table = NULL;
const entity_ht *inv_map = NULL; /* used for !double_encode */
/* only used if flags includes ENT_HTML_IGNORE_ERRORS or ENT_HTML_SUBSTITUTE_DISALLOWED_CHARS */
const unsigned char *replacement = NULL;
size_t replacement_len = 0;
if (all) { /* replace with all named entities */
if (CHARSET_PARTIAL_SUPPORT(charset)) {
php_error_docref0(NULL TSRMLS_CC, E_STRICT, "Only basic entities "
"substitution is supported for multi-byte encodings other than UTF-8; "
"functionality is equivalent to htmlspecialchars");
}
LIMIT_ALL(all, doctype, charset);
}
entity_table = determine_entity_table(all, doctype);
if (all && !CHARSET_UNICODE_COMPAT(charset)) {
to_uni_table = enc_to_uni_index[charset];
}
if (!double_encode) {
/* first arg is 1 because we want to identify valid named entities
* even if we are only encoding the basic ones */
inv_map = unescape_inverse_map(1, flags);
}
if (flags & (ENT_HTML_SUBSTITUTE_ERRORS | ENT_HTML_SUBSTITUTE_DISALLOWED_CHARS)) {
if (charset == cs_utf_8) {
replacement = (const unsigned char*)"\xEF\xBF\xBD";
replacement_len = sizeof("\xEF\xBF\xBD") - 1;
} else {
replacement = (const unsigned char*)"�";
replacement_len = sizeof("�") - 1;
}
}
/* initial estimate */
if (oldlen < 64) {
maxlen = 128;
} else {
maxlen = 2 * oldlen;
if (maxlen < oldlen) {
zend_error_noreturn(E_ERROR, "Input string is too long");
return NULL;
}
}
replaced = emalloc(maxlen + 1); /* adding 1 is safe: maxlen is even */
len = 0;
cursor = 0;
while (cursor < oldlen) {
const unsigned char *mbsequence = NULL;
size_t mbseqlen = 0,
cursor_before = cursor;
int status = SUCCESS;
unsigned int this_char = get_next_char(charset, old, oldlen, &cursor, &status);
/* guarantee we have at least 40 bytes to write.
* In HTML5, entities may take up to 33 bytes */
if (len > maxlen - 40) { /* maxlen can never be smaller than 128 */
replaced = safe_erealloc(replaced, maxlen , 1, 128 + 1);
maxlen += 128;
}
if (status == FAILURE) {
/* invalid MB sequence */
if (flags & ENT_HTML_IGNORE_ERRORS) {
continue;
} else if (flags & ENT_HTML_SUBSTITUTE_ERRORS) {
memcpy(&replaced[len], replacement, replacement_len);
len += replacement_len;
continue;
} else {
efree(replaced);
*newlen = 0;
return STR_EMPTY_ALLOC();
}
} else { /* SUCCESS */
mbsequence = &old[cursor_before];
mbseqlen = cursor - cursor_before;
}
if (this_char != '&') { /* no entity on this position */
const unsigned char *rep = NULL;
size_t rep_len = 0;
if (((this_char == '\'' && !(flags & ENT_HTML_QUOTE_SINGLE)) ||
(this_char == '"' && !(flags & ENT_HTML_QUOTE_DOUBLE))))
goto pass_char_through;
if (all) { /* false that CHARSET_PARTIAL_SUPPORT(charset) */
if (to_uni_table != NULL) {
/* !CHARSET_UNICODE_COMPAT therefore not UTF-8; since UTF-8
* is the only multibyte encoding with !CHARSET_PARTIAL_SUPPORT,
* we're using a single byte encoding */
map_to_unicode(this_char, to_uni_table, &this_char);
if (this_char == 0xFFFF) /* no mapping; pass through */
goto pass_char_through;
}
/* the cursor may advance */
find_entity_for_char(this_char, charset, entity_table.ms_table, &rep,
&rep_len, old, oldlen, &cursor);
} else {
find_entity_for_char_basic(this_char, entity_table.table, &rep, &rep_len);
}
if (rep != NULL) {
replaced[len++] = '&';
memcpy(&replaced[len], rep, rep_len);
len += rep_len;
replaced[len++] = ';';
} else {
/* we did not find an entity for this char.
* check for its validity, if its valid pass it unchanged */
if (flags & ENT_HTML_SUBSTITUTE_DISALLOWED_CHARS) {
if (CHARSET_UNICODE_COMPAT(charset)) {
if (!unicode_cp_is_allowed(this_char, doctype)) {
mbsequence = replacement;
mbseqlen = replacement_len;
}
} else if (to_uni_table) {
if (!all) /* otherwise we already did this */
map_to_unicode(this_char, to_uni_table, &this_char);
if (!unicode_cp_is_allowed(this_char, doctype)) {
mbsequence = replacement;
mbseqlen = replacement_len;
}
} else {
/* not a unicode code point, unless, coincidentally, it's in
* the 0x20..0x7D range (except 0x5C in sjis). We know nothing
* about other code points, because we have no tables. Since
* Unicode code points in that range are not disallowed in any
* document type, we could do nothing. However, conversion
* tables frequently map 0x00-0x1F to the respective C0 code
* points. Let's play it safe and admit that's the case */
if (this_char <= 0x7D &&
!unicode_cp_is_allowed(this_char, doctype)) {
mbsequence = replacement;
mbseqlen = replacement_len;
}
}
}
pass_char_through:
if (mbseqlen > 1) {
memcpy(replaced + len, mbsequence, mbseqlen);
len += mbseqlen;
} else {
replaced[len++] = mbsequence[0];
}
}
} else { /* this_char == '&' */
if (double_encode) {
encode_amp:
memcpy(&replaced[len], "&", sizeof("&") - 1);
len += sizeof("&") - 1;
} else { /* no double encode */
/* check if entity is valid */
size_t ent_len; /* not counting & or ; */
/* peek at next char */
if (old[cursor] == '#') { /* numeric entity */
unsigned code_point;
int valid;
char *pos = (char*)&old[cursor+1];
valid = process_numeric_entity((const char **)&pos, &code_point);
if (valid == FAILURE)
goto encode_amp;
if (flags & ENT_HTML_SUBSTITUTE_DISALLOWED_CHARS) {
if (!numeric_entity_is_allowed(code_point, doctype))
goto encode_amp;
}
ent_len = pos - (char*)&old[cursor];
} else { /* named entity */
/* check for vality of named entity */
const char *start = &old[cursor],
*next = start;
unsigned dummy1, dummy2;
if (process_named_entity_html(&next, &start, &ent_len) == FAILURE)
goto encode_amp;
if (resolve_named_entity_html(start, ent_len, inv_map, &dummy1, &dummy2) == FAILURE) {
if (!(doctype == ENT_HTML_DOC_XHTML && ent_len == 4 && start[0] == 'a'
&& start[1] == 'p' && start[2] == 'o' && start[3] == 's')) {
/* uses html4 inv_map, which doesn't include apos;. This is a
* hack to support it */
goto encode_amp;
}
}
}
/* checks passed; copy entity to result */
/* entity size is unbounded, we may need more memory */
/* at this point maxlen - len >= 40 */
if (maxlen - len < ent_len + 2 /* & and ; */) {
/* ent_len < oldlen, which is certainly <= SIZE_MAX/2 */
replaced = safe_erealloc(replaced, maxlen, 1, ent_len + 128 + 1);
maxlen += ent_len + 128;
}
replaced[len++] = '&';
memcpy(&replaced[len], &old[cursor], ent_len);
len += ent_len;
replaced[len++] = ';';
cursor += ent_len + 1;
}
}
}
replaced[len] = '\0';
*newlen = len;
return replaced;
}
|
39
| 0
|
void CoreUserInputHandler::handleSay(const BufferInfo &bufferInfo, const QString &msg)
{
if (bufferInfo.bufferName().isEmpty() || !bufferInfo.acceptsRegularMessages())
return; // server buffer
std::function<QByteArray(const QString &, const QString &)> encodeFunc = [this] (const QString &target, const QString &message) -> QByteArray {
return channelEncode(target, message);
};
#ifdef HAVE_QCA2
putPrivmsg(bufferInfo.bufferName(), msg, encodeFunc, network()->cipher(bufferInfo.bufferName()));
#else
putPrivmsg(bufferInfo.bufferName(), msg, encodeFunc);
#endif
emit displayMsg(Message::Plain, bufferInfo.type(), bufferInfo.bufferName(), msg, network()->myNick(), Message::Self);
}
|
40
| 0
|
void _netplan_g_string_free_to_file_with_permissions(GString* s, const char* rootdir, const char* path, const char* suffix, const char* owner, const char* group, mode_t mode)
{
g_autofree char* full_path = NULL;
g_autofree char* path_suffix = NULL;
g_autofree char* contents = g_string_free(s, FALSE);
GError* error = NULL;
struct passwd* pw = NULL;
struct group* gr = NULL;
int ret = 0;
path_suffix = g_strjoin(NULL, path, suffix, NULL);
full_path = g_build_path(G_DIR_SEPARATOR_S, rootdir ?: G_DIR_SEPARATOR_S, path_suffix, NULL);
_netplan_safe_mkdir_p_dir(full_path);
if (!g_file_set_contents_full(full_path, contents, -1, G_FILE_SET_CONTENTS_CONSISTENT | G_FILE_SET_CONTENTS_ONLY_EXISTING, mode, &error)) {
/* the mkdir() just succeeded, there is no sensible
* method to test this without root privileges, bind mounts, and
* simulating ENOSPC */
// LCOV_EXCL_START
g_fprintf(stderr, "ERROR: cannot create file %s: %s\n", path, error->message);
exit(1);
// LCOV_EXCL_STOP
}
/* Here we take the owner and group names and look up for their IDs in the passwd and group files.
* It's OK to fail to set the owners and mode as this code will be called from unit tests.
* The autopkgtests will check if the owner/group and mode are correctly set.
*/
pw = getpwnam(owner);
if (!pw) {
g_debug("Failed to determine the UID of user %s: %s", owner, strerror(errno)); // LCOV_EXCL_LINE
}
gr = getgrnam(group);
if (!gr) {
g_debug("Failed to determine the GID of group %s: %s", group, strerror(errno)); // LCOV_EXCL_LINE
}
if (pw && gr) {
ret = chown(full_path, pw->pw_uid, gr->gr_gid);
if (ret != 0) {
g_debug("Failed to set owner and group for file %s: %s", full_path, strerror(errno));
}
}
}
|
41
| 1
|
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
struct hstate *h = hstate_inode(inode);
long chg = region_truncate(&inode->i_mapping->private_list, offset);
spin_lock(&inode->i_lock);
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
spin_unlock(&inode->i_lock);
hugetlb_put_quota(inode->i_mapping, (chg - freed));
hugetlb_acct_memory(h, -(chg - freed));
}
|
42
| 0
|
std::vector<std::string> Utility::getSubjectAltNames(X509& cert, int type, bool skip_unsupported) {
std::vector<std::string> subject_alt_names;
bssl::UniquePtr<GENERAL_NAMES> san_names(
static_cast<GENERAL_NAMES*>(X509_get_ext_d2i(&cert, NID_subject_alt_name, nullptr, nullptr)));
if (san_names == nullptr) {
return subject_alt_names;
}
for (const GENERAL_NAME* san : san_names.get()) {
if (san->type == type) {
if (skip_unsupported) {
// An IP SAN for an unsupported IP version will throw an exception.
// TODO(ggreenway): remove this when IP address construction no longer throws.
TRY_NEEDS_AUDIT_ADDRESS { subject_alt_names.push_back(generalNameAsString(san)); }
END_TRY CATCH(const EnvoyException& e,
{ ENVOY_LOG_MISC(debug, "Error reading SAN, value skipped: {}", e.what()); });
} else {
subject_alt_names.push_back(generalNameAsString(san));
}
}
}
return subject_alt_names;
}
|
43
| 1
|
TfLiteStatus UseDynamicOutputTensors(TfLiteContext* context, TfLiteNode* node) {
for (int i = 0; i < NumOutputs(node); ++i) {
SetTensorToDynamic(GetOutput(context, node, i));
}
return kTfLiteOk;
}
|
44
| 0
|
processBatchMultiRuleset(batch_t *pBatch)
{
ruleset_t *currRuleset;
batch_t snglRuleBatch;
int i;
int iStart; /* start index of partial batch */
int iNew; /* index for new (temporary) batch */
int bHaveUnprocessed; /* do we (still) have unprocessed entries? (loop term predicate) */
DEFiRet;
do {
bHaveUnprocessed = 0;
/* search for first unprocessed element */
for(iStart = 0 ; iStart < pBatch->nElem && pBatch->pElem[iStart].state == BATCH_STATE_DISC ; ++iStart)
/* just search, no action */;
if(iStart == pBatch->nElem)
break; /* everything processed */
/* prepare temporary batch */
CHKiRet(batchInit(&snglRuleBatch, pBatch->nElem));
snglRuleBatch.pbShutdownImmediate = pBatch->pbShutdownImmediate;
currRuleset = batchElemGetRuleset(pBatch, iStart);
iNew = 0;
for(i = iStart ; i < pBatch->nElem ; ++i) {
if(batchElemGetRuleset(pBatch, i) == currRuleset) {
/* for performance reasons, we copy only those members that we actually need */
snglRuleBatch.pElem[iNew].pUsrp = pBatch->pElem[i].pUsrp;
snglRuleBatch.pElem[iNew].state = pBatch->pElem[i].state;
++iNew;
/* We indicate the element also as done, so it will not be processed again */
pBatch->pElem[i].state = BATCH_STATE_DISC;
} else {
bHaveUnprocessed = 1;
}
}
snglRuleBatch.nElem = iNew; /* was left just right by the for loop */
batchSetSingleRuleset(&snglRuleBatch, 1);
/* process temp batch */
processBatch(&snglRuleBatch);
batchFree(&snglRuleBatch);
} while(bHaveUnprocessed == 1);
finalize_it:
RETiRet;
}
|
45
| 0
|
ParticipantGenericMessage(const ParticipantGenericMessage& message) :
message_identity_(message.message_identity_),
related_message_identity_(message.related_message_identity_),
destination_participant_key_(message.destination_participant_key_),
destination_endpoint_key_(message.destination_endpoint_key_),
source_endpoint_key_(message.source_endpoint_key_),
message_class_id_(message.message_class_id_),
message_data_(message.message_data_)
{}
|
46
| 0
|
TfLiteStatus CheckInputTensorDimensions(TfLiteContext* context,
TfLiteNode* node, int n_input,
int n_output, int n_cell,
bool is_layer_norm_lstm) {
const auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
// Making sure clipping parameters have valid values.
// == 0 means no clipping
// > 0 means clipping
TF_LITE_ENSURE(context, params->cell_clip >= 0);
TF_LITE_ENSURE(context, params->proj_clip >= 0);
const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kInputToInputWeightsTensor);
if (input_to_input_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_input_weights->dims->data[1], n_input);
}
const TfLiteTensor* input_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kInputToForgetWeightsTensor,
&input_to_forget_weights));
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_forget_weights->dims->data[1], n_input);
const TfLiteTensor* input_to_cell_weights;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node,
lstm::full::kInputToCellWeightsTensor,
&input_to_cell_weights));
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, input_to_cell_weights->dims->data[1], n_input);
const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kRecurrentToInputWeightsTensor);
if (recurrent_to_input_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[0],
n_cell);
TF_LITE_ENSURE_EQ(context, recurrent_to_input_weights->dims->data[1],
n_output);
}
const TfLiteTensor* recurrent_to_forget_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToForgetWeightsTensor,
&recurrent_to_forget_weights));
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[0],
n_cell);
TF_LITE_ENSURE_EQ(context, recurrent_to_forget_weights->dims->data[1],
n_output);
const TfLiteTensor* recurrent_to_cell_weights;
TF_LITE_ENSURE_OK(
context,
GetInputSafe(context, node, lstm::full::kRecurrentToCellWeightsTensor,
&recurrent_to_cell_weights));
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[0], n_cell);
TF_LITE_ENSURE_EQ(context, recurrent_to_cell_weights->dims->data[1],
n_output);
// We make sure the input-gate's parameters are either both present (regular
// LSTM) or not at all (CIFG-LSTM).
const bool cifg_weights_all_or_none =
((input_to_input_weights != nullptr) &&
(recurrent_to_input_weights != nullptr)) ||
((input_to_input_weights == nullptr) &&
(recurrent_to_input_weights == nullptr));
TF_LITE_ENSURE(context, cifg_weights_all_or_none == true);
const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToInputWeightsTensor);
if (cell_to_input_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_input_weights->dims->data[0], n_cell);
}
const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToForgetWeightsTensor);
if (cell_to_forget_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_forget_weights->dims->data[0], n_cell);
}
const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(
context, node, lstm::full::kCellToOutputWeightsTensor);
if (cell_to_output_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_to_output_weights->dims->data[0], n_cell);
}
// Making sure the peephole weights are there all or none.
const bool use_cifg = (input_to_input_weights == nullptr);
const bool peephole_weights_all_or_none =
((cell_to_input_weights != nullptr || use_cifg) &&
(cell_to_forget_weights != nullptr) &&
(cell_to_output_weights != nullptr)) ||
((cell_to_input_weights == nullptr) &&
(cell_to_forget_weights == nullptr) &&
(cell_to_output_weights == nullptr));
TF_LITE_ENSURE(context, peephole_weights_all_or_none == true);
// Make sure the input gate bias is present only when not a CIFG-LSTM.
const TfLiteTensor* input_gate_bias =
GetOptionalInputTensor(context, node, lstm::full::kInputGateBiasTensor);
if (use_cifg) {
TF_LITE_ENSURE_EQ(context, input_gate_bias, nullptr);
} else {
TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, input_gate_bias->dims->data[0], n_cell);
}
const TfLiteTensor* forget_gate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kForgetGateBiasTensor,
&forget_gate_bias));
TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, forget_gate_bias->dims->data[0], n_cell);
const TfLiteTensor* cell_gate_bias;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, lstm::full::kCellGateBiasTensor,
&cell_gate_bias));
TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_gate_bias->dims->data[0], n_cell);
const TfLiteTensor* output_gate_bias;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node, lstm::full::kOutputGateBiasTensor,
&output_gate_bias));
TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, output_gate_bias->dims->data[0], n_cell);
const TfLiteTensor* projection_weights = GetOptionalInputTensor(
context, node, lstm::full::kProjectionWeightsTensor);
if (projection_weights != nullptr) {
TF_LITE_ENSURE_EQ(context, projection_weights->dims->size, 2);
TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[0], n_output);
TF_LITE_ENSURE_EQ(context, projection_weights->dims->data[1], n_cell);
}
const TfLiteTensor* projection_bias =
GetOptionalInputTensor(context, node, lstm::full::kProjectionBiasTensor);
if (projection_bias != nullptr) {
TF_LITE_ENSURE_EQ(context, projection_bias->dims->size, 1);
TF_LITE_ENSURE_EQ(context, projection_bias->dims->data[0], n_output);
}
// Making sure the projection tensors are consistent:
// 1) If projection weight is not present, then projection bias should not be
// present.
// 2) If projection weight is present, then projection bias is optional.
// TODO(ghodrat): make sure this is correct.
const bool projecton_tensors_consistent =
((projection_weights != nullptr) || (projection_bias == nullptr));
TF_LITE_ENSURE(context, projecton_tensors_consistent == true);
if (is_layer_norm_lstm) {
const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(
context, node, lstm::full::kInputLayerNormCoefficientsTensor);
if (use_cifg) {
TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients, nullptr);
} else {
TF_LITE_ENSURE(context, input_layer_norm_coefficients != nullptr);
TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, input_layer_norm_coefficients->dims->data[0],
n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, input_layer_norm_coefficients->type,
kTfLiteFloat32);
}
const TfLiteTensor* forget_layer_norm_coefficients;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node,
lstm::full::kForgetLayerNormCoefficientsTensor,
&forget_layer_norm_coefficients));
TF_LITE_ENSURE_EQ(context, forget_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, forget_layer_norm_coefficients->dims->data[0],
n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, forget_layer_norm_coefficients->type,
kTfLiteFloat32);
const TfLiteTensor* cell_layer_norm_coefficients;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node,
lstm::full::kCellLayerNormCoefficientsTensor,
&cell_layer_norm_coefficients));
TF_LITE_ENSURE_EQ(context, cell_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, cell_layer_norm_coefficients->dims->data[0],
n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, cell_layer_norm_coefficients->type,
kTfLiteFloat32);
const TfLiteTensor* output_layer_norm_coefficients;
TF_LITE_ENSURE_OK(
context, GetInputSafe(context, node,
lstm::full::kOutputLayerNormCoefficientsTensor,
&output_layer_norm_coefficients));
TF_LITE_ENSURE_EQ(context, output_layer_norm_coefficients->dims->size, 1);
TF_LITE_ENSURE_EQ(context, output_layer_norm_coefficients->dims->data[0],
n_cell);
TF_LITE_ENSURE_TYPES_EQ(context, output_layer_norm_coefficients->type,
kTfLiteFloat32);
}
return kTfLiteOk;
}
|
47
| 0
|
TEST_P(SslSocketTest, Ipv4San) {
const std::string client_ctx_yaml = R"EOF(
common_tls_context:
validation_context:
trusted_ca:
filename: "{{ test_rundir }}/test/config/integration/certs/upstreamcacert.pem"
match_typed_subject_alt_names:
- san_type: IP_ADDRESS
matcher:
exact: "127.0.0.1"
)EOF";
const std::string server_ctx_yaml = R"EOF(
common_tls_context:
tls_certificates:
certificate_chain:
filename: "{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostcert.pem"
private_key:
filename: "{{ test_rundir }}/test/config/integration/certs/upstreamlocalhostkey.pem"
)EOF";
TestUtilOptions test_options(client_ctx_yaml, server_ctx_yaml, true, GetParam());
testUtil(test_options);
}
|
48
| 1
|
find_help_tags(
char_u *arg,
int *num_matches,
char_u ***matches,
int keep_lang)
{
char_u *s, *d;
int i;
// Specific tags that either have a specific replacement or won't go
// through the generic rules.
static char *(except_tbl[][2]) = {
{"*", "star"},
{"g*", "gstar"},
{"[*", "[star"},
{"]*", "]star"},
{":*", ":star"},
{"/*", "/star"},
{"/\\*", "/\\\\star"},
{"\"*", "quotestar"},
{"**", "starstar"},
{"cpo-*", "cpo-star"},
{"/\\(\\)", "/\\\\(\\\\)"},
{"/\\%(\\)", "/\\\\%(\\\\)"},
{"?", "?"},
{"??", "??"},
{":?", ":?"},
{"?<CR>", "?<CR>"},
{"g?", "g?"},
{"g?g?", "g?g?"},
{"g??", "g??"},
{"-?", "-?"},
{"q?", "q?"},
{"v_g?", "v_g?"},
{"/\\?", "/\\\\?"},
{"/\\z(\\)", "/\\\\z(\\\\)"},
{"\\=", "\\\\="},
{":s\\=", ":s\\\\="},
{"[count]", "\\[count]"},
{"[quotex]", "\\[quotex]"},
{"[range]", "\\[range]"},
{":[range]", ":\\[range]"},
{"[pattern]", "\\[pattern]"},
{"\\|", "\\\\bar"},
{"\\%$", "/\\\\%\\$"},
{"s/\\~", "s/\\\\\\~"},
{"s/\\U", "s/\\\\U"},
{"s/\\L", "s/\\\\L"},
{"s/\\1", "s/\\\\1"},
{"s/\\2", "s/\\\\2"},
{"s/\\3", "s/\\\\3"},
{"s/\\9", "s/\\\\9"},
{NULL, NULL}
};
static char *(expr_table[]) = {"!=?", "!~?", "<=?", "<?", "==?", "=~?",
">=?", ">?", "is?", "isnot?"};
int flags;
d = IObuff; // assume IObuff is long enough!
d[0] = NUL;
if (STRNICMP(arg, "expr-", 5) == 0)
{
// When the string starting with "expr-" and containing '?' and matches
// the table, it is taken literally (but ~ is escaped). Otherwise '?'
// is recognized as a wildcard.
for (i = (int)ARRAY_LENGTH(expr_table); --i >= 0; )
if (STRCMP(arg + 5, expr_table[i]) == 0)
{
int si = 0, di = 0;
for (;;)
{
if (arg[si] == '~')
d[di++] = '\\';
d[di++] = arg[si];
if (arg[si] == NUL)
break;
++si;
}
break;
}
}
else
{
// Recognize a few exceptions to the rule. Some strings that contain
// '*'are changed to "star", otherwise '*' is recognized as a wildcard.
for (i = 0; except_tbl[i][0] != NULL; ++i)
if (STRCMP(arg, except_tbl[i][0]) == 0)
{
STRCPY(d, except_tbl[i][1]);
break;
}
}
if (d[0] == NUL) // no match in table
{
// Replace "\S" with "/\\S", etc. Otherwise every tag is matched.
// Also replace "\%^" and "\%(", they match every tag too.
// Also "\zs", "\z1", etc.
// Also "\@<", "\@=", "\@<=", etc.
// And also "\_$" and "\_^".
if (arg[0] == '\\'
&& ((arg[1] != NUL && arg[2] == NUL)
|| (vim_strchr((char_u *)"%_z@", arg[1]) != NULL
&& arg[2] != NUL)))
{
STRCPY(d, "/\\\\");
STRCPY(d + 3, arg + 1);
// Check for "/\\_$", should be "/\\_\$"
if (d[3] == '_' && d[4] == '$')
STRCPY(d + 4, "\\$");
}
else
{
// Replace:
// "[:...:]" with "\[:...:]"
// "[++...]" with "\[++...]"
// "\{" with "\\{" -- matching "} \}"
if ((arg[0] == '[' && (arg[1] == ':'
|| (arg[1] == '+' && arg[2] == '+')))
|| (arg[0] == '\\' && arg[1] == '{'))
*d++ = '\\';
// If tag starts with "('", skip the "(". Fixes CTRL-] on ('option'.
if (*arg == '(' && arg[1] == '\'')
arg++;
for (s = arg; *s; ++s)
{
// Replace "|" with "bar" and '"' with "quote" to match the name of
// the tags for these commands.
// Replace "*" with ".*" and "?" with "." to match command line
// completion.
// Insert a backslash before '~', '$' and '.' to avoid their
// special meaning.
if (d - IObuff > IOSIZE - 10) // getting too long!?
break;
switch (*s)
{
case '|': STRCPY(d, "bar");
d += 3;
continue;
case '"': STRCPY(d, "quote");
d += 5;
continue;
case '*': *d++ = '.';
break;
case '?': *d++ = '.';
continue;
case '$':
case '.':
case '~': *d++ = '\\';
break;
}
// Replace "^x" by "CTRL-X". Don't do this for "^_" to make
// ":help i_^_CTRL-D" work.
// Insert '-' before and after "CTRL-X" when applicable.
if (*s < ' ' || (*s == '^' && s[1] && (ASCII_ISALPHA(s[1])
|| vim_strchr((char_u *)"?@[\\]^", s[1]) != NULL)))
{
if (d > IObuff && d[-1] != '_' && d[-1] != '\\')
*d++ = '_'; // prepend a '_' to make x_CTRL-x
STRCPY(d, "CTRL-");
d += 5;
if (*s < ' ')
{
#ifdef EBCDIC
*d++ = CtrlChar(*s);
#else
*d++ = *s + '@';
#endif
if (d[-1] == '\\')
*d++ = '\\'; // double a backslash
}
else
*d++ = *++s;
if (s[1] != NUL && s[1] != '_')
*d++ = '_'; // append a '_'
continue;
}
else if (*s == '^') // "^" or "CTRL-^" or "^_"
*d++ = '\\';
// Insert a backslash before a backslash after a slash, for search
// pattern tags: "/\|" --> "/\\|".
else if (s[0] == '\\' && s[1] != '\\'
&& *arg == '/' && s == arg + 1)
*d++ = '\\';
// "CTRL-\_" -> "CTRL-\\_" to avoid the special meaning of "\_" in
// "CTRL-\_CTRL-N"
if (STRNICMP(s, "CTRL-\\_", 7) == 0)
{
STRCPY(d, "CTRL-\\\\");
d += 7;
s += 6;
}
*d++ = *s;
// If tag contains "({" or "([", tag terminates at the "(".
// This is for help on functions, e.g.: abs({expr}).
if (*s == '(' && (s[1] == '{' || s[1] =='['))
break;
// If tag starts with ', toss everything after a second '. Fixes
// CTRL-] on 'option'. (would include the trailing '.').
if (*s == '\'' && s > arg && *arg == '\'')
break;
// Also '{' and '}'.
if (*s == '}' && s > arg && *arg == '{')
break;
}
*d = NUL;
if (*IObuff == '`')
{
if (d > IObuff + 2 && d[-1] == '`')
{
// remove the backticks from `command`
mch_memmove(IObuff, IObuff + 1, STRLEN(IObuff));
d[-2] = NUL;
}
else if (d > IObuff + 3 && d[-2] == '`' && d[-1] == ',')
{
// remove the backticks and comma from `command`,
mch_memmove(IObuff, IObuff + 1, STRLEN(IObuff));
d[-3] = NUL;
}
else if (d > IObuff + 4 && d[-3] == '`'
&& d[-2] == '\\' && d[-1] == '.')
{
// remove the backticks and dot from `command`\.
mch_memmove(IObuff, IObuff + 1, STRLEN(IObuff));
d[-4] = NUL;
}
}
}
}
*matches = (char_u **)"";
*num_matches = 0;
flags = TAG_HELP | TAG_REGEXP | TAG_NAMES | TAG_VERBOSE | TAG_NO_TAGFUNC;
if (keep_lang)
flags |= TAG_KEEP_LANG;
if (find_tags(IObuff, num_matches, matches, flags, (int)MAXCOL, NULL) == OK
&& *num_matches > 0)
{
// Sort the matches found on the heuristic number that is after the
// tag name.
qsort((void *)*matches, (size_t)*num_matches,
sizeof(char_u *), help_compare);
// Delete more than TAG_MANY to reduce the size of the listing.
while (*num_matches > TAG_MANY)
vim_free((*matches)[--*num_matches]);
}
return OK;
}
|
49
| 0
|
static Image *ReadTIFFImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
#define ThrowTIFFException(severity,message) \
{ \
if (pixel_info != (MemoryInfo *) NULL) \
pixel_info=RelinquishVirtualMemory(pixel_info); \
if (quantum_info != (QuantumInfo *) NULL) \
quantum_info=DestroyQuantumInfo(quantum_info); \
TIFFClose(tiff); \
ThrowReaderException(severity,message); \
}
const char
*option;
float
*chromaticity,
x_position,
y_position,
x_resolution,
y_resolution;
Image
*image;
int
tiff_status;
MagickBooleanType
more_frames;
MagickSizeType
number_pixels;
MagickStatusType
status;
MemoryInfo
*pixel_info = (MemoryInfo *) NULL;
QuantumInfo
*quantum_info;
QuantumType
quantum_type;
ssize_t
i,
scanline_size,
y;
TIFF
*tiff;
TIFFMethodType
method;
uint16
compress_tag,
bits_per_sample,
endian,
extra_samples,
interlace,
max_sample_value,
min_sample_value,
orientation,
pages,
photometric,
*sample_info,
sample_format,
samples_per_pixel,
units,
value;
uint32
height,
rows_per_strip,
width;
unsigned char
*pixels;
void
*sans[4] = { NULL, NULL, NULL, NULL };
/*
Open image.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) SetMagickThreadValue(tiff_exception,exception);
tiff=TIFFClientOpen(image->filename,"rb",(thandle_t) image,TIFFReadBlob,
TIFFWriteBlob,TIFFSeekBlob,TIFFCloseBlob,TIFFGetBlobSize,TIFFMapBlob,
TIFFUnmapBlob);
if (tiff == (TIFF *) NULL)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
if (exception->severity > ErrorException)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (image_info->number_scenes != 0)
{
/*
Generate blank images for subimage specification (e.g. image.tif[4].
We need to check the number of directores because it is possible that
the subimage(s) are stored in the photoshop profile.
*/
if (image_info->scene < (size_t) TIFFNumberOfDirectories(tiff))
{
for (i=0; i < (ssize_t) image_info->scene; i++)
{
status=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse;
if (status == MagickFalse)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
TIFFClose(tiff);
image=DestroyImageList(image);
return((Image *) NULL);
}
image=SyncNextImageInList(image);
}
}
}
more_frames=MagickTrue;
do
{
/* TIFFPrintDirectory(tiff,stdout,MagickFalse); */
photometric=PHOTOMETRIC_RGB;
if ((TIFFGetField(tiff,TIFFTAG_IMAGEWIDTH,&width) != 1) ||
(TIFFGetField(tiff,TIFFTAG_IMAGELENGTH,&height) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_PHOTOMETRIC,&photometric,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_COMPRESSION,&compress_tag,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_FILLORDER,&endian,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_PLANARCONFIG,&interlace,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLESPERPIXEL,&samples_per_pixel,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_BITSPERSAMPLE,&bits_per_sample,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_SAMPLEFORMAT,&sample_format,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_MINSAMPLEVALUE,&min_sample_value,sans) != 1) ||
(TIFFGetFieldDefaulted(tiff,TIFFTAG_MAXSAMPLEVALUE,&max_sample_value,sans) != 1))
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (((sample_format != SAMPLEFORMAT_IEEEFP) || (bits_per_sample != 64)) &&
((bits_per_sample <= 0) || (bits_per_sample > 32)))
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel");
}
if (samples_per_pixel > MaxPixelChannels)
{
TIFFClose(tiff);
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
}
if (sample_format == SAMPLEFORMAT_IEEEFP)
(void) SetImageProperty(image,"quantum:format","floating-point",
exception);
switch (photometric)
{
case PHOTOMETRIC_MINISBLACK:
{
(void) SetImageProperty(image,"tiff:photometric","min-is-black",
exception);
break;
}
case PHOTOMETRIC_MINISWHITE:
{
(void) SetImageProperty(image,"tiff:photometric","min-is-white",
exception);
break;
}
case PHOTOMETRIC_PALETTE:
{
(void) SetImageProperty(image,"tiff:photometric","palette",exception);
break;
}
case PHOTOMETRIC_RGB:
{
(void) SetImageProperty(image,"tiff:photometric","RGB",exception);
break;
}
case PHOTOMETRIC_CIELAB:
{
(void) SetImageProperty(image,"tiff:photometric","CIELAB",exception);
break;
}
case PHOTOMETRIC_LOGL:
{
(void) SetImageProperty(image,"tiff:photometric","CIE Log2(L)",
exception);
break;
}
case PHOTOMETRIC_LOGLUV:
{
(void) SetImageProperty(image,"tiff:photometric","LOGLUV",exception);
break;
}
#if defined(PHOTOMETRIC_MASK)
case PHOTOMETRIC_MASK:
{
(void) SetImageProperty(image,"tiff:photometric","MASK",exception);
break;
}
#endif
case PHOTOMETRIC_SEPARATED:
{
(void) SetImageProperty(image,"tiff:photometric","separated",exception);
break;
}
case PHOTOMETRIC_YCBCR:
{
(void) SetImageProperty(image,"tiff:photometric","YCBCR",exception);
break;
}
default:
{
(void) SetImageProperty(image,"tiff:photometric","unknown",exception);
break;
}
}
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Geometry: %ux%u",
(unsigned int) width,(unsigned int) height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Interlace: %u",
interlace);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Bits per sample: %u",bits_per_sample);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Min sample value: %u",min_sample_value);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Max sample value: %u",max_sample_value);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Photometric "
"interpretation: %s",GetImageProperty(image,"tiff:photometric",
exception));
}
image->columns=(size_t) width;
image->rows=(size_t) height;
image->depth=(size_t) bits_per_sample;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),"Image depth: %.20g",
(double) image->depth);
image->endian=MSBEndian;
if (endian == FILLORDER_LSB2MSB)
image->endian=LSBEndian;
#if defined(MAGICKCORE_HAVE_TIFFISBIGENDIAN)
if (TIFFIsBigEndian(tiff) == 0)
{
(void) SetImageProperty(image,"tiff:endian","lsb",exception);
image->endian=LSBEndian;
}
else
{
(void) SetImageProperty(image,"tiff:endian","msb",exception);
image->endian=MSBEndian;
}
#endif
if ((photometric == PHOTOMETRIC_MINISBLACK) ||
(photometric == PHOTOMETRIC_MINISWHITE))
image->colorspace=GRAYColorspace;
if (photometric == PHOTOMETRIC_SEPARATED)
image->colorspace=CMYKColorspace;
if (photometric == PHOTOMETRIC_CIELAB)
image->colorspace=LabColorspace;
if ((photometric == PHOTOMETRIC_YCBCR) && (compress_tag != COMPRESSION_JPEG))
image->colorspace=YCbCrColorspace;
status=TIFFGetProfiles(tiff,image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
status=TIFFGetProperties(tiff,image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
option=GetImageOption(image_info,"tiff:exif-properties");
if (IsStringFalse(option) == MagickFalse) /* enabled by default */
(void) TIFFGetEXIFProperties(tiff,image,exception);
option=GetImageOption(image_info,"tiff:gps-properties");
if (IsStringFalse(option) == MagickFalse) /* enabled by default */
(void) TIFFGetGPSProperties(tiff,image,exception);
if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XRESOLUTION,&x_resolution,sans) == 1) &&
(TIFFGetFieldDefaulted(tiff,TIFFTAG_YRESOLUTION,&y_resolution,sans) == 1))
{
image->resolution.x=x_resolution;
image->resolution.y=y_resolution;
}
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_RESOLUTIONUNIT,&units,sans,sans) == 1)
{
if (units == RESUNIT_INCH)
image->units=PixelsPerInchResolution;
if (units == RESUNIT_CENTIMETER)
image->units=PixelsPerCentimeterResolution;
}
if ((TIFFGetFieldDefaulted(tiff,TIFFTAG_XPOSITION,&x_position,sans) == 1) &&
(TIFFGetFieldDefaulted(tiff,TIFFTAG_YPOSITION,&y_position,sans) == 1))
{
image->page.x=CastDoubleToLong(ceil(x_position*
image->resolution.x-0.5));
image->page.y=CastDoubleToLong(ceil(y_position*
image->resolution.y-0.5));
}
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_ORIENTATION,&orientation,sans) == 1)
image->orientation=(OrientationType) orientation;
if (TIFFGetField(tiff,TIFFTAG_WHITEPOINT,&chromaticity) == 1)
{
if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0))
{
image->chromaticity.white_point.x=chromaticity[0];
image->chromaticity.white_point.y=chromaticity[1];
}
}
if (TIFFGetField(tiff,TIFFTAG_PRIMARYCHROMATICITIES,&chromaticity) == 1)
{
if ((chromaticity != (float *) NULL) && (*chromaticity != 0.0))
{
image->chromaticity.red_primary.x=chromaticity[0];
image->chromaticity.red_primary.y=chromaticity[1];
image->chromaticity.green_primary.x=chromaticity[2];
image->chromaticity.green_primary.y=chromaticity[3];
image->chromaticity.blue_primary.x=chromaticity[4];
image->chromaticity.blue_primary.y=chromaticity[5];
}
}
#if defined(MAGICKCORE_HAVE_TIFFISCODECCONFIGURED) || (TIFFLIB_VERSION > 20040919)
if ((compress_tag != COMPRESSION_NONE) &&
(TIFFIsCODECConfigured(compress_tag) == 0))
{
TIFFClose(tiff);
ThrowReaderException(CoderError,"CompressNotSupported");
}
#endif
switch (compress_tag)
{
case COMPRESSION_NONE: image->compression=NoCompression; break;
case COMPRESSION_CCITTFAX3: image->compression=FaxCompression; break;
case COMPRESSION_CCITTFAX4: image->compression=Group4Compression; break;
case COMPRESSION_JPEG:
{
image->compression=JPEGCompression;
#if defined(JPEG_SUPPORT)
{
char
sampling_factor[MagickPathExtent];
uint16
horizontal,
vertical;
tiff_status=TIFFGetField(tiff,TIFFTAG_YCBCRSUBSAMPLING,&horizontal,
&vertical);
if (tiff_status == 1)
{
(void) FormatLocaleString(sampling_factor,MagickPathExtent,
"%dx%d",horizontal,vertical);
(void) SetImageProperty(image,"jpeg:sampling-factor",
sampling_factor,exception);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Sampling Factors: %s",sampling_factor);
}
}
#endif
break;
}
case COMPRESSION_OJPEG: image->compression=JPEGCompression; break;
#if defined(COMPRESSION_LZMA)
case COMPRESSION_LZMA: image->compression=LZMACompression; break;
#endif
case COMPRESSION_LZW: image->compression=LZWCompression; break;
case COMPRESSION_DEFLATE: image->compression=ZipCompression; break;
case COMPRESSION_ADOBE_DEFLATE: image->compression=ZipCompression; break;
#if defined(COMPRESSION_WEBP)
case COMPRESSION_WEBP: image->compression=WebPCompression; break;
#endif
#if defined(COMPRESSION_ZSTD)
case COMPRESSION_ZSTD: image->compression=ZstdCompression; break;
#endif
default: image->compression=RLECompression; break;
}
quantum_info=(QuantumInfo *) NULL;
if ((photometric == PHOTOMETRIC_PALETTE) &&
(pow(2.0,1.0*bits_per_sample) <= MaxColormapSize))
{
size_t
colors;
colors=(size_t) GetQuantumRange(bits_per_sample)+1;
if (AcquireImageColormap(image,colors,exception) == MagickFalse)
{
TIFFClose(tiff);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
}
value=(unsigned short) image->scene;
if (TIFFGetFieldDefaulted(tiff,TIFFTAG_PAGENUMBER,&value,&pages,sans) == 1)
image->scene=value;
if (image->storage_class == PseudoClass)
{
size_t
range;
uint16
*blue_colormap,
*green_colormap,
*red_colormap;
/*
Initialize colormap.
*/
tiff_status=TIFFGetField(tiff,TIFFTAG_COLORMAP,&red_colormap,
&green_colormap,&blue_colormap);
if (tiff_status == 1)
{
if ((red_colormap != (uint16 *) NULL) &&
(green_colormap != (uint16 *) NULL) &&
(blue_colormap != (uint16 *) NULL))
{
range=255; /* might be old style 8-bit colormap */
for (i=0; i < (ssize_t) image->colors; i++)
if ((red_colormap[i] >= 256) || (green_colormap[i] >= 256) ||
(blue_colormap[i] >= 256))
{
range=65535;
break;
}
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].red=ClampToQuantum(((double)
QuantumRange*red_colormap[i])/range);
image->colormap[i].green=ClampToQuantum(((double)
QuantumRange*green_colormap[i])/range);
image->colormap[i].blue=ClampToQuantum(((double)
QuantumRange*blue_colormap[i])/range);
}
}
}
}
if (image_info->ping != MagickFalse)
{
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
goto next_tiff_frame;
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
status=SetImageColorspace(image,image->colorspace,exception);
status&=ResetImagePixels(image,exception);
if (status == MagickFalse)
{
TIFFClose(tiff);
return(DestroyImageList(image));
}
/*
Allocate memory for the image and pixel buffer.
*/
quantum_info=AcquireQuantumInfo(image_info,image);
if (quantum_info == (QuantumInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
if (sample_format == SAMPLEFORMAT_UINT)
status=SetQuantumFormat(image,quantum_info,UnsignedQuantumFormat);
if (sample_format == SAMPLEFORMAT_INT)
status=SetQuantumFormat(image,quantum_info,SignedQuantumFormat);
if (sample_format == SAMPLEFORMAT_IEEEFP)
status=SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat);
if (status == MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
status=MagickTrue;
switch (photometric)
{
case PHOTOMETRIC_MINISBLACK:
{
quantum_info->min_is_white=MagickFalse;
break;
}
case PHOTOMETRIC_MINISWHITE:
{
quantum_info->min_is_white=MagickTrue;
break;
}
default:
break;
}
extra_samples=0;
tiff_status=TIFFGetFieldDefaulted(tiff,TIFFTAG_EXTRASAMPLES,&extra_samples,
&sample_info,sans);
if (tiff_status == 1)
{
(void) SetImageProperty(image,"tiff:alpha","unspecified",exception);
if (extra_samples == 0)
{
if ((samples_per_pixel == 4) && (photometric == PHOTOMETRIC_RGB))
image->alpha_trait=BlendPixelTrait;
}
else
for (i=0; i < extra_samples; i++)
{
image->alpha_trait=BlendPixelTrait;
if (sample_info[i] == EXTRASAMPLE_ASSOCALPHA)
{
SetQuantumAlphaType(quantum_info,AssociatedQuantumAlpha);
(void) SetImageProperty(image,"tiff:alpha","associated",
exception);
}
else
if (sample_info[i] == EXTRASAMPLE_UNASSALPHA)
{
SetQuantumAlphaType(quantum_info,DisassociatedQuantumAlpha);
(void) SetImageProperty(image,"tiff:alpha","unassociated",
exception);
}
}
}
if (image->alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
method=ReadGenericMethod;
rows_per_strip=(uint32) image->rows;
if (TIFFGetField(tiff,TIFFTAG_ROWSPERSTRIP,&rows_per_strip) == 1)
{
char
buffer[MagickPathExtent];
(void) FormatLocaleString(buffer,MagickPathExtent,"%u",
(unsigned int) rows_per_strip);
(void) SetImageProperty(image,"tiff:rows-per-strip",buffer,exception);
method=ReadStripMethod;
if (rows_per_strip > (uint32) image->rows)
rows_per_strip=(uint32) image->rows;
}
if (TIFFIsTiled(tiff) != MagickFalse)
{
uint32
columns,
rows;
if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) ||
(TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1))
ThrowTIFFException(CoderError,"ImageIsNotTiled");
if ((AcquireMagickResource(WidthResource,columns) == MagickFalse) ||
(AcquireMagickResource(HeightResource,rows) == MagickFalse))
ThrowTIFFException(ImageError,"WidthOrHeightExceedsLimit");
method=ReadTileMethod;
}
if ((photometric == PHOTOMETRIC_LOGLUV) ||
(compress_tag == COMPRESSION_CCITTFAX3))
method=ReadGenericMethod;
if (image->compression == JPEGCompression)
method=GetJPEGMethod(image,tiff,photometric,bits_per_sample,
samples_per_pixel);
quantum_info->endian=LSBEndian;
scanline_size=TIFFScanlineSize(tiff);
if (scanline_size <= 0)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
number_pixels=MagickMax((MagickSizeType) image->columns*samples_per_pixel*
pow(2.0,ceil(log(bits_per_sample)/log(2.0))),image->columns*
rows_per_strip);
if ((double) scanline_size > 1.5*number_pixels)
ThrowTIFFException(CorruptImageError,"CorruptImage");
number_pixels=MagickMax((MagickSizeType) scanline_size,number_pixels);
pixel_info=AcquireVirtualMemory(number_pixels,sizeof(uint32));
if (pixel_info == (MemoryInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
(void) memset(pixels,0,number_pixels*sizeof(uint32));
quantum_type=GrayQuantum;
if (image->storage_class == PseudoClass)
quantum_type=IndexQuantum;
if (interlace != PLANARCONFIG_SEPARATE)
{
size_t
pad;
pad=(size_t) MagickMax((ssize_t) samples_per_pixel-1,0);
if (image->alpha_trait != UndefinedPixelTrait)
{
if (image->storage_class == PseudoClass)
quantum_type=IndexAlphaQuantum;
else
quantum_type=samples_per_pixel == 1 ? AlphaQuantum :
GrayAlphaQuantum;
}
if ((samples_per_pixel > 2) && (interlace != PLANARCONFIG_SEPARATE))
{
quantum_type=RGBQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-3,0);
if (image->alpha_trait != UndefinedPixelTrait)
{
quantum_type=RGBAQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0);
}
if (image->colorspace == CMYKColorspace)
{
quantum_type=CMYKQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-4,0);
if (image->alpha_trait != UndefinedPixelTrait)
{
quantum_type=CMYKAQuantum;
pad=(size_t) MagickMax((size_t) samples_per_pixel-5,0);
}
}
status=SetQuantumPad(image,quantum_info,pad*((bits_per_sample+7) >>
3));
if (status == MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
}
}
switch (method)
{
case ReadYCCKMethod:
{
/*
Convert YCC TIFF image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
unsigned char
*p;
tiff_status=TIFFReadPixels(tiff,0,y,(char *) pixels);
if (tiff_status == -1)
break;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
p=pixels;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelCyan(image,ScaleCharToQuantum(ClampYCC((double) *p+
(1.402*(double) *(p+2))-179.456)),q);
SetPixelMagenta(image,ScaleCharToQuantum(ClampYCC((double) *p-
(0.34414*(double) *(p+1))-(0.71414*(double ) *(p+2))+
135.45984)),q);
SetPixelYellow(image,ScaleCharToQuantum(ClampYCC((double) *p+
(1.772*(double) *(p+1))-226.816)),q);
SetPixelBlack(image,ScaleCharToQuantum((unsigned char) *(p+3)),q);
q+=GetPixelChannels(image);
p+=4;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case ReadStripMethod:
{
unsigned char
*p;
size_t
extent;
ssize_t
stride,
strip_id;
tsize_t
strip_size;
unsigned char
*strip_pixels;
/*
Convert stripped TIFF image.
*/
extent=4*TIFFStripSize(tiff);
#if defined(TIFF_VERSION_BIG)
extent+=image->columns*sizeof(uint64);
#else
extent+=image->columns*sizeof(uint32);
#endif
strip_pixels=(unsigned char *) AcquireQuantumMemory(extent,
sizeof(*strip_pixels));
if (strip_pixels == (unsigned char *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(strip_pixels,0,extent*sizeof(*strip_pixels));
stride=TIFFVStripSize(tiff,1);
strip_id=0;
p=strip_pixels;
for (i=0; i < (ssize_t) samples_per_pixel; i++)
{
size_t
rows_remaining;
switch (i)
{
case 0: break;
case 1: quantum_type=GreenQuantum; break;
case 2: quantum_type=BlueQuantum; break;
case 3:
{
quantum_type=AlphaQuantum;
if (image->colorspace == CMYKColorspace)
quantum_type=BlackQuantum;
break;
}
case 4: quantum_type=AlphaQuantum; break;
default: break;
}
rows_remaining=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
if (rows_remaining == 0)
{
strip_size=TIFFReadEncodedStrip(tiff,strip_id,strip_pixels,
TIFFStripSize(tiff));
if (strip_size == -1)
break;
rows_remaining=rows_per_strip;
if ((y+rows_per_strip) > (ssize_t) image->rows)
rows_remaining=(rows_per_strip-(y+rows_per_strip-
image->rows));
p=strip_pixels;
strip_id++;
}
(void) ImportQuantumPixels(image,(CacheView *) NULL,
quantum_info,quantum_type,p,exception);
p+=stride;
rows_remaining--;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE))
break;
}
strip_pixels=(unsigned char *) RelinquishMagickMemory(strip_pixels);
break;
}
case ReadTileMethod:
{
unsigned char
*p;
size_t
extent;
uint32
columns,
rows;
unsigned char
*tile_pixels;
/*
Convert tiled TIFF image.
*/
if ((TIFFGetField(tiff,TIFFTAG_TILEWIDTH,&columns) != 1) ||
(TIFFGetField(tiff,TIFFTAG_TILELENGTH,&rows) != 1))
ThrowTIFFException(CoderError,"ImageIsNotTiled");
number_pixels=(MagickSizeType) columns*rows;
if (HeapOverflowSanityCheck(rows,sizeof(*tile_pixels)) != MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
extent=TIFFTileSize(tiff);
#if defined(TIFF_VERSION_BIG)
extent+=columns*sizeof(uint64);
#else
extent+=columns*sizeof(uint32);
#endif
tile_pixels=(unsigned char *) AcquireQuantumMemory(extent,
sizeof(*tile_pixels));
if (tile_pixels == (unsigned char *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(tile_pixels,0,extent*sizeof(*tile_pixels));
for (i=0; i < (ssize_t) samples_per_pixel; i++)
{
switch (i)
{
case 0: break;
case 1: quantum_type=GreenQuantum; break;
case 2: quantum_type=BlueQuantum; break;
case 3:
{
quantum_type=AlphaQuantum;
if (image->colorspace == CMYKColorspace)
quantum_type=BlackQuantum;
break;
}
case 4: quantum_type=AlphaQuantum; break;
default: break;
}
for (y=0; y < (ssize_t) image->rows; y+=rows)
{
ssize_t
x;
size_t
rows_remaining;
rows_remaining=image->rows-y;
if ((ssize_t) (y+rows) < (ssize_t) image->rows)
rows_remaining=rows;
for (x=0; x < (ssize_t) image->columns; x+=columns)
{
size_t
columns_remaining,
row;
columns_remaining=image->columns-x;
if ((ssize_t) (x+columns) < (ssize_t) image->columns)
columns_remaining=columns;
if (TIFFReadTile(tiff,tile_pixels,(uint32) x,(uint32) y,0,i) == 0)
break;
p=tile_pixels;
for (row=0; row < rows_remaining; row++)
{
Quantum
*magick_restrict q;
q=GetAuthenticPixels(image,x,y+row,columns_remaining,1,
exception);
if (q == (Quantum *) NULL)
break;
(void) ImportQuantumPixels(image,(CacheView *) NULL,
quantum_info,quantum_type,p,exception);
p+=TIFFTileRowSize(tiff);
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
}
}
if ((samples_per_pixel > 1) && (interlace != PLANARCONFIG_SEPARATE))
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) i,
samples_per_pixel);
if (status == MagickFalse)
break;
}
}
tile_pixels=(unsigned char *) RelinquishMagickMemory(tile_pixels);
break;
}
case ReadGenericMethod:
default:
{
MemoryInfo
*generic_info = (MemoryInfo * ) NULL;
uint32
*p;
uint32
*pixels;
/*
Convert generic TIFF image.
*/
if (HeapOverflowSanityCheck(image->rows,sizeof(*pixels)) != MagickFalse)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
number_pixels=(MagickSizeType) image->columns*image->rows;
#if defined(TIFF_VERSION_BIG)
number_pixels+=image->columns*sizeof(uint64);
#else
number_pixels+=image->columns*sizeof(uint32);
#endif
generic_info=AcquireVirtualMemory(number_pixels,sizeof(uint32));
if (generic_info == (MemoryInfo *) NULL)
ThrowTIFFException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(uint32 *) GetVirtualMemoryBlob(generic_info);
(void) TIFFReadRGBAImage(tiff,(uint32) image->columns,(uint32)
image->rows,(uint32 *) pixels,0);
p=pixels+(image->columns*image->rows)-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
q+=GetPixelChannels(image)*(image->columns-1);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
TIFFGetR(*p)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
TIFFGetG(*p)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
TIFFGetB(*p)),q);
if (image->alpha_trait != UndefinedPixelTrait)
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
TIFFGetA(*p)),q);
p--;
q-=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
generic_info=RelinquishVirtualMemory(generic_info);
break;
}
}
pixel_info=RelinquishVirtualMemory(pixel_info);
SetQuantumImageType(image,quantum_type);
next_tiff_frame:
if (quantum_info != (QuantumInfo *) NULL)
quantum_info=DestroyQuantumInfo(quantum_info);
if (photometric == PHOTOMETRIC_CIELAB)
DecodeLabImage(image,exception);
if ((photometric == PHOTOMETRIC_LOGL) ||
(photometric == PHOTOMETRIC_MINISBLACK) ||
(photometric == PHOTOMETRIC_MINISWHITE))
{
image->type=GrayscaleType;
if (bits_per_sample == 1)
image->type=BilevelType;
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
more_frames=TIFFReadDirectory(tiff) != 0 ? MagickTrue : MagickFalse;
if (more_frames != MagickFalse)
{
/*
Allocate next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,image->scene-1,
image->scene);
if (status == MagickFalse)
break;
}
} while ((status != MagickFalse) && (more_frames != MagickFalse));
TIFFClose(tiff);
if (status != MagickFalse)
TIFFReadPhotoshopLayers(image_info,image,exception);
if ((image_info->number_scenes != 0) &&
(image_info->scene >= GetImageListLength(image)))
status=MagickFalse;
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
|
50
| 1
|
static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl2_adapter *adapter;
static int cards_found;
unsigned long mmio_start;
int mmio_len;
int err;
cards_found = 0;
err = pci_enable_device(pdev);
if (err)
return err;
/*
* atl2 is a shared-high-32-bit device, so we're stuck with 32-bit DMA
* until the kernel has the proper infrastructure to support 64-bit DMA
* on these devices.
*/
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
goto err_dma;
}
/* Mark all PCI regions associated with PCI device
* pdev as being reserved by owner atl2_driver_name */
err = pci_request_regions(pdev, atl2_driver_name);
if (err)
goto err_pci_reg;
/* Enables bus-mastering on the device and calls
* pcibios_set_master to do the needed arch specific settings */
pci_set_master(pdev);
err = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct atl2_adapter));
if (!netdev)
goto err_alloc_etherdev;
SET_NETDEV_DEV(netdev, &pdev->dev);
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->hw.back = adapter;
mmio_start = pci_resource_start(pdev, 0x0);
mmio_len = pci_resource_len(pdev, 0x0);
adapter->hw.mem_rang = (u32)mmio_len;
adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
if (!adapter->hw.hw_addr) {
err = -EIO;
goto err_ioremap;
}
atl2_setup_pcicmd(pdev);
netdev->netdev_ops = &atl2_netdev_ops;
netdev->ethtool_ops = &atl2_ethtool_ops;
netdev->watchdog_timeo = 5 * HZ;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
adapter->bd_number = cards_found;
adapter->pci_using_64 = false;
/* setup the private structure */
err = atl2_sw_init(adapter);
if (err)
goto err_sw_init;
err = -EIO;
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
/* Init PHY as early as possible due to power saving issue */
atl2_phy_init(&adapter->hw);
/* reset the controller to
* put the device in a known good starting state */
if (atl2_reset_hw(&adapter->hw)) {
err = -EIO;
goto err_reset;
}
/* copy the MAC address out of the EEPROM */
atl2_read_mac_addr(&adapter->hw);
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
goto err_eeprom;
}
atl2_check_options(adapter);
setup_timer(&adapter->watchdog_timer, atl2_watchdog,
(unsigned long)adapter);
setup_timer(&adapter->phy_config_timer, atl2_phy_config,
(unsigned long)adapter);
INIT_WORK(&adapter->reset_task, atl2_reset_task);
INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
strcpy(netdev->name, "eth%d"); /* ?? */
err = register_netdev(netdev);
if (err)
goto err_register;
/* assume we have no link for now */
netif_carrier_off(netdev);
netif_stop_queue(netdev);
cards_found++;
return 0;
err_reset:
err_register:
err_sw_init:
err_eeprom:
iounmap(adapter->hw.hw_addr);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
return err;
}
|
51
| 0
|
static struct flb_input_instance *find_input(struct flb_hs *hs, const char *name, size_t nlen)
{
struct mk_list *head;
struct flb_input_instance *in;
mk_list_foreach(head, &hs->config->inputs) {
in = mk_list_entry(head, struct flb_input_instance, _head);
if (strlen(in->name) != nlen) {
continue;
}
if (strncmp(name, in->name, nlen) == 0) {
return in;
}
if (in->alias) {
if (strcmp(name, in->alias) == 0) {
return in;
}
}
}
return NULL;
}
|
52
| 0
|
void nfc_llcp_unregister_device(struct nfc_dev *dev)
{
struct nfc_llcp_local *local = nfc_llcp_remove_local(dev);
if (local == NULL) {
pr_debug("No such device\n");
return;
}
local_cleanup(local);
nfc_llcp_local_put(local);
}
|
53
| 0
|
bool UnboundedHelloWorldPubSubType::deserialize(
SerializedPayload_t* payload,
void* data)
{
try
{
// Convert DATA to pointer of your type
UnboundedHelloWorld* p_type = static_cast<UnboundedHelloWorld*>(data);
// Object that manages the raw buffer.
eprosima::fastcdr::FastBuffer fastbuffer(reinterpret_cast<char*>(payload->data), payload->length);
// Object that deserializes the data.
eprosima::fastcdr::Cdr deser(fastbuffer, eprosima::fastcdr::Cdr::DEFAULT_ENDIAN
#if FASTCDR_VERSION_MAJOR == 1
, eprosima::fastcdr::Cdr::CdrType::DDS_CDR
#endif // FASTCDR_VERSION_MAJOR == 1
);
// Deserialize encapsulation.
deser.read_encapsulation();
payload->encapsulation = deser.endianness() == eprosima::fastcdr::Cdr::BIG_ENDIANNESS ? CDR_BE : CDR_LE;
// Deserialize the object.
deser >> *p_type;
}
catch (eprosima::fastcdr::exception::Exception& /*exception*/)
{
return false;
}
return true;
}
|
54
| 0
|
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next)
{
if (!vma->anon_vma || vma->vm_ops)
return;
__vma_adjust_trans_huge(vma, start, end, adjust_next);
}
|
55
| 0
|
int ParseWave64HeaderConfig (FILE *infile, char *infilename, char *fourcc, WavpackContext *wpc, WavpackConfig *config)
{
int64_t total_samples = 0, infilesize;
Wave64ChunkHeader chunk_header;
Wave64FileHeader filehdr;
WaveHeader WaveHeader;
int format_chunk = 0;
uint32_t bcount;
infilesize = DoGetFileSize (infile);
memcpy (&filehdr, fourcc, 4);
if (!DoReadFile (infile, ((char *) &filehdr) + 4, sizeof (Wave64FileHeader) - 4, &bcount) ||
bcount != sizeof (Wave64FileHeader) - 4 || memcmp (filehdr.ckID, riff_guid, sizeof (riff_guid)) ||
memcmp (filehdr.formType, wave_guid, sizeof (wave_guid))) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &filehdr, sizeof (filehdr))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
#if 1 // this might be a little too picky...
WavpackLittleEndianToNative (&filehdr, Wave64ChunkHeaderFormat);
if (infilesize && !(config->qmode & QMODE_IGNORE_LENGTH) &&
filehdr.ckSize && filehdr.ckSize + 1 && filehdr.ckSize != infilesize) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
#endif
// loop through all elements of the wave64 header
// (until the data chuck) and copy them to the output file
while (1) {
if (!DoReadFile (infile, &chunk_header, sizeof (Wave64ChunkHeader), &bcount) ||
bcount != sizeof (Wave64ChunkHeader)) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &chunk_header, sizeof (Wave64ChunkHeader))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackLittleEndianToNative (&chunk_header, Wave64ChunkHeaderFormat);
chunk_header.ckSize -= sizeof (chunk_header);
// if it's the format chunk, we want to get some info out of there and
// make sure it's a .wav file we can handle
if (!memcmp (chunk_header.ckID, fmt_guid, sizeof (fmt_guid))) {
int supported = TRUE, format;
if (format_chunk++) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
chunk_header.ckSize = (chunk_header.ckSize + 7) & ~7L;
if (chunk_header.ckSize < 16 || chunk_header.ckSize > sizeof (WaveHeader) ||
!DoReadFile (infile, &WaveHeader, (uint32_t) chunk_header.ckSize, &bcount) ||
bcount != chunk_header.ckSize) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
else if (!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, &WaveHeader, (uint32_t) chunk_header.ckSize)) {
error_line ("%s", WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
WavpackLittleEndianToNative (&WaveHeader, WaveHeaderFormat);
if (debug_logging_mode) {
error_line ("format tag size = %d", chunk_header.ckSize);
error_line ("FormatTag = %x, NumChannels = %d, BitsPerSample = %d",
WaveHeader.FormatTag, WaveHeader.NumChannels, WaveHeader.BitsPerSample);
error_line ("BlockAlign = %d, SampleRate = %d, BytesPerSecond = %d",
WaveHeader.BlockAlign, WaveHeader.SampleRate, WaveHeader.BytesPerSecond);
if (chunk_header.ckSize > 16)
error_line ("cbSize = %d, ValidBitsPerSample = %d", WaveHeader.cbSize,
WaveHeader.ValidBitsPerSample);
if (chunk_header.ckSize > 20)
error_line ("ChannelMask = %x, SubFormat = %d",
WaveHeader.ChannelMask, WaveHeader.SubFormat);
}
if (chunk_header.ckSize > 16 && WaveHeader.cbSize == 2)
config->qmode |= QMODE_ADOBE_MODE;
format = (WaveHeader.FormatTag == 0xfffe && chunk_header.ckSize == 40) ?
WaveHeader.SubFormat : WaveHeader.FormatTag;
config->bits_per_sample = (chunk_header.ckSize == 40 && WaveHeader.ValidBitsPerSample) ?
WaveHeader.ValidBitsPerSample : WaveHeader.BitsPerSample;
if (format != 1 && format != 3)
supported = FALSE;
if (format == 3 && config->bits_per_sample != 32)
supported = FALSE;
if (!WaveHeader.NumChannels || WaveHeader.NumChannels > 256 ||
WaveHeader.BlockAlign / WaveHeader.NumChannels < (config->bits_per_sample + 7) / 8 ||
WaveHeader.BlockAlign / WaveHeader.NumChannels > 4 ||
WaveHeader.BlockAlign % WaveHeader.NumChannels)
supported = FALSE;
if (config->bits_per_sample < 1 || config->bits_per_sample > 32)
supported = FALSE;
if (!supported) {
error_line ("%s is an unsupported .W64 format!", infilename);
return WAVPACK_SOFT_ERROR;
}
if (chunk_header.ckSize < 40) {
if (!config->channel_mask && !(config->qmode & QMODE_CHANS_UNASSIGNED)) {
if (WaveHeader.NumChannels <= 2)
config->channel_mask = 0x5 - WaveHeader.NumChannels;
else if (WaveHeader.NumChannels <= 18)
config->channel_mask = (1 << WaveHeader.NumChannels) - 1;
else
config->channel_mask = 0x3ffff;
}
}
else if (WaveHeader.ChannelMask && (config->channel_mask || (config->qmode & QMODE_CHANS_UNASSIGNED))) {
error_line ("this W64 file already has channel order information!");
return WAVPACK_SOFT_ERROR;
}
else if (WaveHeader.ChannelMask)
config->channel_mask = WaveHeader.ChannelMask;
if (format == 3)
config->float_norm_exp = 127;
else if ((config->qmode & QMODE_ADOBE_MODE) &&
WaveHeader.BlockAlign / WaveHeader.NumChannels == 4) {
if (WaveHeader.BitsPerSample == 24)
config->float_norm_exp = 127 + 23;
else if (WaveHeader.BitsPerSample == 32)
config->float_norm_exp = 127 + 15;
}
if (debug_logging_mode) {
if (config->float_norm_exp == 127)
error_line ("data format: normalized 32-bit floating point");
else
error_line ("data format: %d-bit integers stored in %d byte(s)",
config->bits_per_sample, WaveHeader.BlockAlign / WaveHeader.NumChannels);
}
}
else if (!memcmp (chunk_header.ckID, data_guid, sizeof (data_guid))) { // on the data chunk, get size and exit loop
if (!WaveHeader.NumChannels) { // make sure we saw "fmt" chunk
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
if ((config->qmode & QMODE_IGNORE_LENGTH) || chunk_header.ckSize <= 0) {
config->qmode |= QMODE_IGNORE_LENGTH;
if (infilesize && DoGetFilePosition (infile) != -1)
total_samples = (infilesize - DoGetFilePosition (infile)) / WaveHeader.BlockAlign;
else
total_samples = -1;
}
else {
if (infilesize && infilesize - chunk_header.ckSize > 16777216) {
error_line ("this .W64 file has over 16 MB of extra RIFF data, probably is corrupt!");
return WAVPACK_SOFT_ERROR;
}
total_samples = chunk_header.ckSize / WaveHeader.BlockAlign;
if (!total_samples) {
error_line ("this .W64 file has no audio samples, probably is corrupt!");
return WAVPACK_SOFT_ERROR;
}
if (total_samples > MAX_WAVPACK_SAMPLES) {
error_line ("%s has too many samples for WavPack!", infilename);
return WAVPACK_SOFT_ERROR;
}
}
config->bytes_per_sample = WaveHeader.BlockAlign / WaveHeader.NumChannels;
config->num_channels = WaveHeader.NumChannels;
config->sample_rate = WaveHeader.SampleRate;
break;
}
else { // just copy unknown chunks to output file
int bytes_to_copy = (chunk_header.ckSize + 7) & ~7L;
char *buff;
if (bytes_to_copy < 0 || bytes_to_copy > 4194304) {
error_line ("%s is not a valid .W64 file!", infilename);
return WAVPACK_SOFT_ERROR;
}
buff = malloc (bytes_to_copy);
if (debug_logging_mode)
error_line ("extra unknown chunk \"%c%c%c%c\" of %d bytes",
chunk_header.ckID [0], chunk_header.ckID [1], chunk_header.ckID [2],
chunk_header.ckID [3], chunk_header.ckSize);
if (!DoReadFile (infile, buff, bytes_to_copy, &bcount) ||
bcount != bytes_to_copy ||
(!(config->qmode & QMODE_NO_STORE_WRAPPER) &&
!WavpackAddWrapper (wpc, buff, bytes_to_copy))) {
error_line ("%s", WavpackGetErrorMessage (wpc));
free (buff);
return WAVPACK_SOFT_ERROR;
}
free (buff);
}
}
if (!WavpackSetConfiguration64 (wpc, config, total_samples, NULL)) {
error_line ("%s: %s", infilename, WavpackGetErrorMessage (wpc));
return WAVPACK_SOFT_ERROR;
}
return WAVPACK_NO_ERROR;
}
|
56
| 0
|
void LiInitializeConnectionCallbacks(PCONNECTION_LISTENER_CALLBACKS clCallbacks) {
memset(clCallbacks, 0, sizeof(*clCallbacks));
}
|
57
| 0
|
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
struct ipv6_txoptions *opt;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
int err;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (usin->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
memset(&fl6, 0, sizeof(fl6));
if (np->sndflow) {
fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
IP6_ECN_flow_init(fl6.flowlabel);
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
struct ip6_flowlabel *flowlabel;
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
fl6_sock_release(flowlabel);
}
}
/*
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
if (ipv6_addr_any(&usin->sin6_addr))
usin->sin6_addr.s6_addr[15] = 0x1;
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type & IPV6_ADDR_MULTICAST)
return -ENETUNREACH;
if (addr_type&IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
usin->sin6_scope_id) {
/* If interface is set while binding, indices
* must coincide.
*/
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != usin->sin6_scope_id)
return -EINVAL;
sk->sk_bound_dev_if = usin->sin6_scope_id;
}
/* Connect to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
return -EINVAL;
}
if (tp->rx_opt.ts_recent_stamp &&
!ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
tp->write_seq = 0;
}
sk->sk_v6_daddr = usin->sin6_addr;
np->flow_label = fl6.flowlabel;
/*
* TCP over IPv4
*/
if (addr_type == IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
sin.sin_family = AF_INET;
sin.sin_port = usin->sin6_port;
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
icsk->icsk_af_ops = &ipv6_mapped;
sk->sk_backlog_rcv = tcp_v4_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
if (err) {
icsk->icsk_ext_hdr_len = exthdrlen;
icsk->icsk_af_ops = &ipv6_specific;
sk->sk_backlog_rcv = tcp_v6_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv6_specific;
#endif
goto failure;
}
np->saddr = sk->sk_v6_rcv_saddr;
return err;
}
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
saddr = &sk->sk_v6_rcv_saddr;
fl6.flowi6_proto = IPPROTO_TCP;
fl6.daddr = sk->sk_v6_daddr;
fl6.saddr = saddr ? *saddr : np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto failure;
}
if (!saddr) {
saddr = &fl6.saddr;
sk->sk_v6_rcv_saddr = *saddr;
}
/* set the source address */
np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
__ip6_dst_store(sk, dst, NULL, NULL);
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp &&
ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
tcp_fetch_timewait_stamp(sk, dst);
icsk->icsk_ext_hdr_len = 0;
if (opt)
icsk->icsk_ext_hdr_len = opt->opt_flen +
opt->opt_nflen;
tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
inet->inet_dport = usin->sin6_port;
tcp_set_state(sk, TCP_SYN_SENT);
err = inet6_hash_connect(&tcp_death_row, sk);
if (err)
goto late_failure;
sk_set_txhash(sk);
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
sk->sk_v6_daddr.s6_addr32,
inet->inet_sport,
inet->inet_dport);
err = tcp_connect(sk);
if (err)
goto late_failure;
return 0;
late_failure:
tcp_set_state(sk, TCP_CLOSE);
__sk_dst_reset(sk);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;
return err;
}
|
58
| 0
|
error_t httpParseParam(const char_t **pos, HttpParam *param)
{
error_t error;
size_t i;
uint8_t c;
bool_t escapeFlag;
bool_t separatorFound;
const char_t *p;
//Check parameters
if(pos == NULL || param == NULL)
return ERROR_INVALID_PARAMETER;
//Initialize structure
param->name = NULL;
param->nameLen = 0;
param->value = NULL;
param->valueLen = 0;
//Initialize variables
escapeFlag = FALSE;
separatorFound = FALSE;
//Initialize status code
error = ERROR_IN_PROGRESS;
//Point to the first character
i = 0;
p = *pos;
//Loop through the list of parameters
while(error == ERROR_IN_PROGRESS)
{
//Get current character
c = (uint8_t) p[i];
//Check current state
if(param->name == NULL)
{
//Check current character
if(c == '\0')
{
//The list of parameters is empty
error = ERROR_NOT_FOUND;
}
else if(c == ' ' || c == '\t' || c == ',' || c == ';')
{
//Discard whitespace and separator characters
}
else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128)
{
//Point to the first character of the parameter name
param->name = p + i;
}
else
{
//Invalid character
error = ERROR_INVALID_SYNTAX;
}
}
else if(param->nameLen == 0)
{
//Check current character
if(c == '\0' || c == ',' || c == ';')
{
//Save the length of the parameter name
param->nameLen = p + i - param->name;
//Successful processing
error = NO_ERROR;
}
else if(c == ' ' || c == '\t')
{
//Save the length of the parameter name
param->nameLen = p + i - param->name;
}
else if(c == '=')
{
//The key/value separator has been found
separatorFound = TRUE;
//Save the length of the parameter name
param->nameLen = p + i - param->name;
}
else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128)
{
//Advance data pointer
}
else
{
//Invalid character
error = ERROR_INVALID_SYNTAX;
}
}
else if(!separatorFound)
{
//Check current character
if(c == '\0' || c == ',' || c == ';')
{
//Successful processing
error = NO_ERROR;
}
else if(c == ' ' || c == '\t')
{
//Discard whitespace characters
}
else if(c == '=')
{
//The key/value separator has been found
separatorFound = TRUE;
}
else if(c == '\"')
{
//Point to the first character that follows the parameter name
i = param->name + param->nameLen - p;
//Successful processing
error = NO_ERROR;
}
else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128)
{
//Point to the first character that follows the parameter name
i = param->name + param->nameLen - p;
//Successful processing
error = NO_ERROR;
}
else
{
//Invalid character
error = ERROR_INVALID_SYNTAX;
}
}
else if(param->value == NULL)
{
//Check current character
if(c == '\0' || c == ',' || c == ';')
{
//Successful processing
error = NO_ERROR;
}
else if(c == ' ' || c == '\t')
{
//Discard whitespace characters
}
else if(c == '\"')
{
//A string of text is parsed as a single word if it is quoted
//using double-quote marks (refer to RFC 7230, section 3.2.6)
param->value = p + i;
}
else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128)
{
//Point to the first character of the parameter value
param->value = p + i;
}
else
{
//Invalid character
error = ERROR_INVALID_SYNTAX;
}
}
else
{
//Quoted string?
if(param->value[0] == '\"')
{
//Check current character
if(c == '\0')
{
//The second double quote is missing
error = ERROR_INVALID_SYNTAX;
}
else if(escapeFlag)
{
//Recipients that process the value of a quoted-string must
//handle a quoted-pair as if it were replaced by the octet
//following the backslash
escapeFlag = FALSE;
}
else if(c == '\\')
{
//The backslash octet can be used as a single-octet quoting
//mechanism within quoted-string and comment constructs
escapeFlag = TRUE;
}
else if(c == '\"')
{
//Advance pointer over the double quote
i++;
//Save the length of the parameter value
param->valueLen = p + i - param->value;
//Successful processing
error = NO_ERROR;
}
else if(isprint(c) || c == '\t' || c >= 128)
{
//Advance data pointer
}
else
{
//Invalid character
error = ERROR_INVALID_SYNTAX;
}
}
else
{
//Check current character
if(c == '\0' || c == ' ' || c == '\t' || c == ',' || c == ';')
{
//Save the length of the parameter value
param->valueLen = p + i - param->value;
//Successful processing
error = NO_ERROR;
}
else if(isalnum(c) || osStrchr("!#$%&'*+-.^_`|~", c) || c >= 128)
{
//Advance data pointer
}
else
{
//Invalid character
error = ERROR_INVALID_SYNTAX;
}
}
}
//Point to the next character of the string
if(error == ERROR_IN_PROGRESS)
i++;
}
//Check whether the parameter value is a quoted string
if(param->valueLen >= 2 && param->value[0] == '\"')
{
//Discard the surrounding quotes
param->value++;
param->valueLen -= 2;
}
//Actual position if the list of parameters
*pos = p + i;
//Return status code
return error;
}
|
59
| 0
|
static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
struct in6_addr *final_p, final;
struct flowi6 fl6;
int err = -1;
struct dst_entry *dst;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
fl6.daddr = ireq->ir_v6_rmt_addr;
fl6.saddr = ireq->ir_v6_loc_addr;
fl6.flowlabel = 0;
fl6.flowi6_oif = ireq->ir_iif;
fl6.fl6_dport = ireq->ir_rmt_port;
fl6.fl6_sport = htons(ireq->ir_num);
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
rcu_read_lock();
final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
rcu_read_unlock();
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto done;
}
skb = dccp_make_response(sk, dst, req);
if (skb != NULL) {
struct dccp_hdr *dh = dccp_hdr(skb);
dh->dccph_checksum = dccp_v6_csum_finish(skb,
&ireq->ir_v6_loc_addr,
&ireq->ir_v6_rmt_addr);
fl6.daddr = ireq->ir_v6_rmt_addr;
rcu_read_lock();
err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
np->tclass);
rcu_read_unlock();
err = net_xmit_eval(err);
}
done:
dst_release(dst);
return err;
}
|
60
| 1
|
ast2obj_arguments(void* _o)
{
arguments_ty o = (arguments_ty)_o;
PyObject *result = NULL, *value = NULL;
if (!o) {
Py_INCREF(Py_None);
return Py_None;
}
result = PyType_GenericNew(arguments_type, NULL, NULL);
if (!result) return NULL;
value = ast2obj_list(o->args, ast2obj_arg);
if (!value) goto failed;
if (_PyObject_SetAttrId(result, &PyId_args, value) == -1)
goto failed;
Py_DECREF(value);
value = ast2obj_arg(o->vararg);
if (!value) goto failed;
if (_PyObject_SetAttrId(result, &PyId_vararg, value) == -1)
goto failed;
Py_DECREF(value);
value = ast2obj_list(o->kwonlyargs, ast2obj_arg);
if (!value) goto failed;
if (_PyObject_SetAttrId(result, &PyId_kwonlyargs, value) == -1)
goto failed;
Py_DECREF(value);
value = ast2obj_list(o->kw_defaults, ast2obj_expr);
if (!value) goto failed;
if (_PyObject_SetAttrId(result, &PyId_kw_defaults, value) == -1)
goto failed;
Py_DECREF(value);
value = ast2obj_arg(o->kwarg);
if (!value) goto failed;
if (_PyObject_SetAttrId(result, &PyId_kwarg, value) == -1)
goto failed;
Py_DECREF(value);
value = ast2obj_list(o->defaults, ast2obj_expr);
if (!value) goto failed;
if (_PyObject_SetAttrId(result, &PyId_defaults, value) == -1)
goto failed;
Py_DECREF(value);
return result;
failed:
Py_XDECREF(value);
Py_XDECREF(result);
return NULL;
}
|
61
| 1
|
update_notification_create(struct update_notification **file)
{
struct update_notification *tmp;
struct deltas_head *list;
int error;
tmp = malloc(sizeof(struct update_notification));
if (tmp == NULL)
return pr_enomem();
list = NULL;
error = deltas_head_create(&list);
if (error) {
free(tmp);
return error;
}
tmp->deltas_list = list;
tmp->uri = NULL;
global_data_init(&tmp->global_data);
doc_data_init(&tmp->snapshot);
*file = tmp;
return 0;
}
|
62
| 1
|
TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node,
OpData* op_data) {
// Parse input tensor boxencodings
const TfLiteTensor* input_box_encodings =
GetInput(context, node, kInputTensorBoxEncodings);
TF_LITE_ENSURE_EQ(context, input_box_encodings->dims->data[0], kBatchSize);
const int num_boxes = input_box_encodings->dims->data[1];
TF_LITE_ENSURE(context, input_box_encodings->dims->data[2] >= kNumCoordBox);
const TfLiteTensor* input_anchors =
GetInput(context, node, kInputTensorAnchors);
// Decode the boxes to get (ymin, xmin, ymax, xmax) based on the anchors
CenterSizeEncoding box_centersize;
CenterSizeEncoding scale_values = op_data->scale_values;
CenterSizeEncoding anchor;
for (int idx = 0; idx < num_boxes; ++idx) {
switch (input_box_encodings->type) {
// Quantized
case kTfLiteUInt8:
DequantizeBoxEncodings(
input_box_encodings, idx,
static_cast<float>(input_box_encodings->params.zero_point),
static_cast<float>(input_box_encodings->params.scale),
input_box_encodings->dims->data[2], &box_centersize);
DequantizeBoxEncodings(
input_anchors, idx,
static_cast<float>(input_anchors->params.zero_point),
static_cast<float>(input_anchors->params.scale), kNumCoordBox,
&anchor);
break;
// Float
case kTfLiteFloat32: {
// Please see DequantizeBoxEncodings function for the support detail.
const int box_encoding_idx = idx * input_box_encodings->dims->data[2];
const float* boxes =
&(GetTensorData<float>(input_box_encodings)[box_encoding_idx]);
box_centersize = *reinterpret_cast<const CenterSizeEncoding*>(boxes);
anchor =
ReInterpretTensor<const CenterSizeEncoding*>(input_anchors)[idx];
break;
}
default:
// Unsupported type.
return kTfLiteError;
}
float ycenter = box_centersize.y / scale_values.y * anchor.h + anchor.y;
float xcenter = box_centersize.x / scale_values.x * anchor.w + anchor.x;
float half_h =
0.5f * static_cast<float>(std::exp(box_centersize.h / scale_values.h)) *
anchor.h;
float half_w =
0.5f * static_cast<float>(std::exp(box_centersize.w / scale_values.w)) *
anchor.w;
TfLiteTensor* decoded_boxes =
&context->tensors[op_data->decoded_boxes_index];
auto& box = ReInterpretTensor<BoxCornerEncoding*>(decoded_boxes)[idx];
box.ymin = ycenter - half_h;
box.xmin = xcenter - half_w;
box.ymax = ycenter + half_h;
box.xmax = xcenter + half_w;
}
return kTfLiteOk;
}
|
63
| 1
|
void gdImageWBMPCtx (gdImagePtr image, int fg, gdIOCtx * out)
{
int x, y, pos;
Wbmp *wbmp;
/* create the WBMP */
if ((wbmp = createwbmp (gdImageSX (image), gdImageSY (image), WBMP_WHITE)) == NULL) {
gd_error("Could not create WBMP");
return;
}
/* fill up the WBMP structure */
pos = 0;
for (y = 0; y < gdImageSY(image); y++) {
for (x = 0; x < gdImageSX(image); x++) {
if (gdImageGetPixel (image, x, y) == fg) {
wbmp->bitmap[pos] = WBMP_BLACK;
}
pos++;
}
}
/* write the WBMP to a gd file descriptor */
if (writewbmp (wbmp, &gd_putout, out)) {
gd_error("Could not save WBMP");
}
/* des submitted this bugfix: gdFree the memory. */
freewbmp(wbmp);
}
|
End of preview. Expand
in Data Studio
@inproceedings{bhandari2021:cvefixes,
title = {{CVEfixes: Automated Collection of Vulnerabilities and Their Fixes from Open-Source Software}},
booktitle = {{Proceedings of the 17th International Conference on Predictive Models and Data Analytics in Software Engineering (PROMISE '21)}},
author = {Bhandari, Guru and Naseer, Amara and Moonen, Leon},
year = {2021},
pages = {10},
publisher = {{ACM}},
doi = {10.1145/3475960.3475985},
copyright = {Open Access},
isbn = {978-1-4503-8680-7},
language = {en}
}
- Downloads last month
- 10