mirror of
https://github.com/bsnes-emu/bsnes.git
synced 2025-04-02 10:42:14 -04:00
byuu says: Changelog: - nall: fixed major memory leak in string class - ruby: video shaders support #define-based settings now - phoenix/GTK+: support > 256x256 icons for window / task bar / alt-tab - sfc: remove random/ and config/, merge into system/ - ethos: delete higan.png (48x48), replace with higan512.png (512x512) as new higan.png - ethos: default gamma to 100% (no color adjustment) - ethos: use "Video Shaders/Display Emulation/" instead of "Video Shaders/Emulation/" - use g++ instead of g++-4.7 (g++ -v must be >= 4.7) - use -std=c++11 instead of -std=gnu++11 - applied a few patches from Debian upstream to make their packaging job easier So because colors are normalized in GLSL, I won't be able to offer video shaders absolute color literals. We will have to perform basic color conversion inside the core. As such, the current plan is to create some sort of Emulator::Settings interface. With that, I'll connect an option for color correction, which will be on by default. For FC/SFC, that will mean gamma correction (darker / stronger colors), and for GB/GBC/GBA, it will mean simulating the weird brightness levels of the displays. I am undecided on whether to use pea soup green for the GB or not. By not doing so, it'll be easier for the display emulation shader to do it.
103 lines
2.3 KiB
C++
103 lines
2.3 KiB
C++
#ifndef NALL_ATOI_HPP
|
|
#define NALL_ATOI_HPP
|
|
|
|
#include <nall/stdint.hpp>
|
|
|
|
namespace nall {
|
|
|
|
constexpr inline uintmax_t binary_(const char* s, uintmax_t sum = 0) {
|
|
return (
|
|
*s == '0' || *s == '1' ? binary_(s + 1, (sum << 1) | *s - '0') :
|
|
*s == '\'' ? binary_(s + 1, sum) :
|
|
sum
|
|
);
|
|
}
|
|
|
|
constexpr inline uintmax_t octal_(const char* s, uintmax_t sum = 0) {
|
|
return (
|
|
*s >= '0' && *s <= '7' ? octal_(s + 1, (sum << 3) | *s - '0') :
|
|
*s == '\'' ? octal_(s + 1, sum) :
|
|
sum
|
|
);
|
|
}
|
|
|
|
constexpr inline uintmax_t decimal_(const char* s, uintmax_t sum = 0) {
|
|
return (
|
|
*s >= '0' && *s <= '9' ? decimal_(s + 1, (sum * 10) + *s - '0') :
|
|
*s == '\'' ? decimal_(s + 1, sum) :
|
|
sum
|
|
);
|
|
}
|
|
|
|
constexpr inline uintmax_t hex_(const char* s, uintmax_t sum = 0) {
|
|
return (
|
|
*s >= 'A' && *s <= 'F' ? hex_(s + 1, (sum << 4) | *s - 'A' + 10) :
|
|
*s >= 'a' && *s <= 'f' ? hex_(s + 1, (sum << 4) | *s - 'a' + 10) :
|
|
*s >= '0' && *s <= '9' ? hex_(s + 1, (sum << 4) | *s - '0') :
|
|
*s == '\'' ? hex_(s + 1, sum) :
|
|
sum
|
|
);
|
|
}
|
|
|
|
//
|
|
|
|
constexpr inline uintmax_t binary(const char* s) {
|
|
return (
|
|
*s == '0' && *(s + 1) == 'B' ? binary_(s + 2) :
|
|
*s == '0' && *(s + 1) == 'b' ? binary_(s + 2) :
|
|
*s == '%' ? binary_(s + 1) :
|
|
binary_(s)
|
|
);
|
|
}
|
|
|
|
constexpr inline uintmax_t octal(const char* s) {
|
|
return (
|
|
*s == '0' && *(s + 1) == 'O' ? octal_(s + 2) :
|
|
*s == '0' && *(s + 1) == 'o' ? octal_(s + 2) :
|
|
octal_(s)
|
|
);
|
|
}
|
|
|
|
constexpr inline intmax_t integer(const char* s) {
|
|
return (
|
|
*s == '+' ? +decimal_(s + 1) :
|
|
*s == '-' ? -decimal_(s + 1) :
|
|
decimal_(s)
|
|
);
|
|
}
|
|
|
|
constexpr inline uintmax_t decimal(const char* s) {
|
|
return (
|
|
decimal_(s)
|
|
);
|
|
}
|
|
|
|
constexpr inline uintmax_t hex(const char* s) {
|
|
return (
|
|
*s == '0' && *(s + 1) == 'X' ? hex_(s + 2) :
|
|
*s == '0' && *(s + 1) == 'x' ? hex_(s + 2) :
|
|
*s == '$' ? hex_(s + 1) :
|
|
hex_(s)
|
|
);
|
|
}
|
|
|
|
constexpr inline intmax_t numeral(const char* s) {
|
|
return (
|
|
*s == '0' && *(s + 1) == 'X' ? hex_(s + 2) :
|
|
*s == '0' && *(s + 1) == 'x' ? hex_(s + 2) :
|
|
*s == '0' && *(s + 1) == 'B' ? binary_(s + 2) :
|
|
*s == '0' && *(s + 1) == 'b' ? binary_(s + 2) :
|
|
*s == '0' ? octal_(s + 1) :
|
|
*s == '+' ? +decimal_(s + 1) :
|
|
*s == '-' ? -decimal_(s + 1) :
|
|
decimal_(s)
|
|
);
|
|
}
|
|
|
|
inline double real(const char* s) {
|
|
return atof(s);
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|