Fix TIME command microseconds overflow under 32-bits (#11368)
The old `server.unixtime*1000000` will overflow in 32-bits. This was introduced in #10300 (not released).
This commit is contained in:
parent
9b94e93eb8
commit
1cc511d7cb
@ -4373,7 +4373,7 @@ void echoCommand(client *c) {
|
||||
void timeCommand(client *c) {
|
||||
addReplyArrayLen(c,2);
|
||||
addReplyBulkLongLong(c, server.unixtime);
|
||||
addReplyBulkLongLong(c, server.ustime-server.unixtime*1000000);
|
||||
addReplyBulkLongLong(c, server.ustime-((long long)server.unixtime)*1000000);
|
||||
}
|
||||
|
||||
typedef struct replyFlagNames {
|
||||
|
@ -3,6 +3,13 @@ proc cmdstat {cmd} {
|
||||
}
|
||||
|
||||
start_server {tags {"introspection"}} {
|
||||
test {The microsecond part of the TIME command will not overflow} {
|
||||
set now [r time]
|
||||
set microseconds [lindex $now 1]
|
||||
assert_morethan $microseconds 0
|
||||
assert_lessthan $microseconds 1000000
|
||||
}
|
||||
|
||||
test {TTL, TYPE and EXISTS do not alter the last access time of a key} {
|
||||
r set foo bar
|
||||
after 3000
|
||||
|
Loading…
Reference in New Issue
Block a user