Warning fixes.

This commit is contained in:
shenweip 2020-09-16 14:05:06 +08:00 committed by Henrik Rydgård
parent 291f17e279
commit a66cb1237d
5 changed files with 21 additions and 21 deletions

View file

@ -1715,7 +1715,7 @@ void FramebufferManagerCommon::SetRenderSize(VirtualFramebuffer *vfb) {
force1x = vfb->bufferWidth <= 256 || vfb->bufferHeight <= 128;
break;
case 3:
force1x = vfb->bufferWidth < 480 || vfb->bufferWidth > 800 || vfb->bufferHeight < 272; // GOW uses 864x272
force1x = vfb->bufferWidth < 480 || vfb->bufferWidth > 800 || vfb->bufferHeight < 272; // GOW uses 864x272
break;
}

View file

@ -137,7 +137,7 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
// do an unaligned read and rotate the words around a bit, or do the reads very
// slowly (trip through kernel mode). There's no simple #define that says just
// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
// "ARMv7 or higher", so we have to filter away all ARMv5 and ARMv6
// sub-architectures.
//
// This is a mess, but there's not much we can do about it.

View file

@ -384,12 +384,12 @@ static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
if (len_less_than_12 && SNAPPY_PREDICT_TRUE(offset < 2048)) {
// offset fits in 11 bits. The 3 highest go in the top of the first byte,
// and the rest go in the second byte.
*op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0);
*op++ = (char)(COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0));
*op++ = offset & 0xff;
} else {
// Write 4 bytes, though we only care about 3 of them. The output buffer
// is required to have some slack, so the extra byte won't overrun it.
uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
uint32 u = (uint32)(COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8));
LittleEndian::Store32(op, u);
op += 3;
}
@ -458,7 +458,7 @@ uint32 CalculateTableSize(uint32 input_size) {
namespace internal {
WorkingMemory::WorkingMemory(size_t input_size) {
const size_t max_fragment_size = std::min(input_size, kBlockSize);
const size_t table_size = CalculateTableSize(max_fragment_size);
const size_t table_size = CalculateTableSize((uint32)max_fragment_size);
size_ = table_size * sizeof(*table_) + max_fragment_size +
MaxCompressedLength(max_fragment_size);
mem_ = std::allocator<char>().allocate(size_);
@ -473,9 +473,9 @@ WorkingMemory::~WorkingMemory() {
uint16* WorkingMemory::GetHashTable(size_t fragment_size,
int* table_size) const {
const size_t htsize = CalculateTableSize(fragment_size);
const size_t htsize = CalculateTableSize((uint32)fragment_size);
memset(table_, 0, htsize * sizeof(*table_));
*table_size = htsize;
*table_size = (int)htsize;
return table_;
}
} // end namespace internal
@ -864,7 +864,7 @@ class SnappyDecompressor {
// Long literal.
const size_t literal_length_length = literal_length - 60;
literal_length =
ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) +
ExtractLowBytes(LittleEndian::Load32(ip), (int)literal_length_length) +
1;
ip += literal_length_length;
}
@ -877,7 +877,7 @@ class SnappyDecompressor {
size_t n;
ip = reader_->Peek(&n);
avail = n;
peeked_ = avail;
peeked_ = (uint32)avail;
if (avail == 0) return; // Premature end of input
ip_limit_ = ip + avail;
}
@ -889,7 +889,7 @@ class SnappyDecompressor {
} else {
const size_t entry = char_table[c];
const size_t trailer =
ExtractLowBytes(LittleEndian::Load32(ip), entry >> 11);
ExtractLowBytes(LittleEndian::Load32(ip), (int)entry >> 11);
const size_t length = entry & 0xff;
ip += entry >> 11;
@ -915,7 +915,7 @@ bool SnappyDecompressor::RefillTag() {
reader_->Skip(peeked_); // All peeked bytes are used up
size_t n;
ip = reader_->Peek(&n);
peeked_ = n;
peeked_ = (uint32)n;
eof_ = (n == 0);
if (eof_) return false;
ip_limit_ = ip + n;
@ -942,7 +942,7 @@ bool SnappyDecompressor::RefillTag() {
size_t length;
const char* src = reader_->Peek(&length);
if (length == 0) return false;
uint32 to_add = std::min<uint32>(needed - nbuf, length);
uint32 to_add = std::min<uint32>(needed - nbuf, (uint32)length);
memcpy(scratch_ + nbuf, src, to_add);
nbuf += to_add;
reader_->Skip(to_add);
@ -972,7 +972,7 @@ static bool InternalUncompress(Source* r, Writer* writer) {
uint32 uncompressed_len = 0;
if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
return InternalUncompressAllTags(&decompressor, writer, r->Available(),
return InternalUncompressAllTags(&decompressor, writer, (uint32)r->Available(),
uncompressed_len);
}
@ -1001,7 +1001,7 @@ size_t Compress(Source* reader, Sink* writer) {
size_t N = reader->Available();
const size_t uncompressed_size = N;
char ulength[Varint::kMax32];
char* p = Varint::Encode32(ulength, N);
char* p = Varint::Encode32(ulength, (uint32)N);
writer->Append(ulength, p-ulength);
written += (p - ulength);
@ -1043,7 +1043,7 @@ size_t Compress(Source* reader, Sink* writer) {
uint16* table = wmem.GetHashTable(num_to_read, &table_size);
// Compress input_fragment and append to dest
const int max_output = MaxCompressedLength(num_to_read);
const int max_output = (int)MaxCompressedLength(num_to_read);
// Need a scratch buffer for the output, in case the byte sink doesn't
// have room for us directly.
@ -1507,7 +1507,7 @@ class SnappyScatteredWriter {
char* const op_end = op_ptr_ + len;
// See SnappyArrayWriter::AppendFromSelf for an explanation of
// the "offset - 1u" trick.
if (SNAPPY_PREDICT_TRUE(offset - 1u < op_ptr_ - op_base_ &&
if (SNAPPY_PREDICT_TRUE(offset - 1u < (size_t)(op_ptr_ - op_base_) &&
op_end <= op_limit_)) {
// Fast path: src and dst in current block.
op_ptr_ = IncrementalCopy(op_ptr_ - offset, op_ptr_, op_end, op_limit_);
@ -1540,7 +1540,7 @@ bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
// Make new block
size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
op_base_ = allocator_.Allocate(bsize);
op_base_ = allocator_.Allocate((int)bsize);
op_ptr_ = op_base_;
op_limit_ = op_base_ + bsize;
blocks_.push_back(op_base_);
@ -1647,13 +1647,13 @@ bool Uncompress(Source* compressed, Sink* uncompressed) {
if (allocated_size >= uncompressed_len) {
SnappyArrayWriter writer(buf);
bool result = InternalUncompressAllTags(&decompressor, &writer,
compressed_len, uncompressed_len);
(uint32)compressed_len, uncompressed_len);
uncompressed->Append(buf, writer.Produced());
return result;
} else {
SnappySinkAllocator allocator(uncompressed);
SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
return InternalUncompressAllTags(&decompressor, &writer, compressed_len,
return InternalUncompressAllTags(&decompressor, &writer, (uint32)compressed_len,
uncompressed_len);
}
}

View file

@ -444,7 +444,7 @@ void blendPixel(const Kernel_3x3& ker,
return true;
//make sure there is no second blending in an adjacent rotation for this pixel: handles insular pixels, mario eyes
if (getTopR(blend) != BLEND_NONE && !eq(e, g)) //but support double-blending for 90° corners
if (getTopR(blend) != BLEND_NONE && !eq(e, g)) //but support double-blending for 90?corners
return false;
if (getBottomL(blend) != BLEND_NONE && !eq(e, c))
return false;

View file

@ -3115,7 +3115,7 @@ XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
int i;
for (i=0; i < nbRounds; ++i) {
/* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*',
* this will warn "discards const qualifier". */
* this will warn "discards 'const' qualifier". */
union {
XXH_ALIGN(64) const __m512i* cp;
XXH_ALIGN(64) void* p;