sds genRedisInfoString(char *section) {
sds info = sdsempty();
time_t uptime = server.unixtime-server.stat_starttime;
int j;
struct rusage self_ru, c_ru;
int allsections = 0, defsections = 0;
int sections = 0;
if (section == NULL) section = "default";
allsections = strcasecmp(section,"all") == 0;
defsections = strcasecmp(section,"default") == 0;
getrusage(RUSAGE_SELF, &self_ru);
getrusage(RUSAGE_CHILDREN, &c_ru);
/* Server */
if (allsections || defsections || !strcasecmp(section,"server")) {
static int call_uname = 1;
static struct utsname name;
char *mode;
if (server.cluster_enabled) mode = "cluster";
else if (server.sentinel_mode) mode = "sentinel";
else mode = "standalone";
if (sections++) info = sdscat(info,"\r\n");
if (call_uname) {
/* Uname can be slow and is always the same output. Cache it. */
uname(&name);
call_uname = 0;
}
unsigned int lruclock;
atomicGet(server.lruclock,lruclock);
info = sdscatprintf(info,
"# Server\r\n"
"redis_version:%s\r\n" //redis版本
"redis_git_sha1:%s\r\n" //git sha1
"redis_git_dirty:%d\r\n" //git dirty flag
"redis_build_id:%llx\r\n" //redis build id
"redis_mode:%s\r\n" //运行模式,单机或集群
"os:%s %s %s\r\n" //redis服务器的宿主操作系统
"arch_bits:%d\r\n" //架构(32位或64位)
"multiplexing_api:%s\r\n" //所使用的事件处理机制,epoll
"atomicvar_api:%s\r\n" //new,所使用的GNU Compiler Collection,原子处理API
"gcc_version:%d.%d.%d\r\n" //编译时使用的gcc版本
"process_id:%ld\r\n"//redis服务进程pid
"run_id:%s\r\n" //redis服务器的随机标识符(用于sentinel和集群)
"tcp_port:%d\r\n" //服务器监听端口
"uptime_in_seconds:%jd\r\n" //服务器启动总时间,s
"uptime_in_days:%jd\r\n" //服务启动总时间,单位天
"hz:%d\r\n" //redis内部调度(进行关闭timeout的客户端,删除过期key等等)频率,程序规定serverCron每秒运行10次,实际;根据dynamic-hz的可选值为yes和no,5.0.8新增
"configured_hz:%d\r\n" //new,已设置的hz,基准
"lru_clock:%ld\r\n" //自增的时钟,用于LRU管理,该时钟100ms(hz=10,因此每1000ms/10=100ms执行一次定时任务)更新一次。
"executable:%s\r\n" //执行文件的位置
"config_file:%s\r\n", //配置文件的路径
REDIS_VERSION,
redisGitSHA1(),
strtol(redisGitDirty(),NULL,10) > 0,
(unsigned long long) redisBuildId(),
mode,
name.sysname, name.release, name.machine,
server.arch_bits,
aeGetApiName(),
REDIS_ATOMIC_API,
#ifdef __GNUC__
__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__,
#else
0,0,0,
#endif
(long) getpid(),
server.runid,
server.port,
(intmax_t)uptime,
(intmax_t)(uptime/(3600*24)),
server.hz,
server.config_hz,
(unsigned long) lruclock,
server.executable ? server.executable : "",
server.configfile ? server.configfile : "");
}
/* Clients */
if (allsections || defsections || !strcasecmp(section,"clients")) {
size_t maxin, maxout;
getExpansiveClientsInfo(&maxin,&maxout);
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info,
"# Clients\r\n"
"connected_clients:%lu\r\n" //已连接的客户端数目(不包括slave)
"client_recent_max_input_buffer:%zu\r\n" //new,当前连接的客户端当中,最大输入缓存,与3.2.8求法不同
"client_recent_max_output_buffer:%zu\r\n" //当前连接的客户端当中,最长的输出列表
"blocked_clients:%d\r\n", //正在等待阻塞命令(BLPOP、BRPOP、BRPOPLPUSH)的客户端的数量
listLength(server.clients)-listLength(server.slaves),
maxin, maxout,
server.blocked_clients);
}
/* Memory */
if (allsections || defsections || !strcasecmp(section,"memory")) {
char hmem[64];
char peak_hmem[64];
char total_system_hmem[64];
char used_memory_lua_hmem[64];
char used_memory_scripts_hmem[64];
char used_memory_rss_hmem[64];
char maxmemory_hmem[64];
size_t zmalloc_used = zmalloc_used_memory();
size_t total_system_mem = server.system_memory_size;
const char *evict_policy = evictPolicyToString();
long long memory_lua = (long long)lua_gc(server.lua,LUA_GCCOUNT,0)*1024;
struct redisMemOverhead *mh = getMemoryOverheadData();
/* Peak memory is updated from time to time by serverCron() so it
* may happen that the instantaneous value is slightly bigger than
* the peak value. This may confuse users, so we update the peak
* if found smaller than the current memory usage. */
if (zmalloc_used > server.stat_peak_memory)
server.stat_peak_memory = zmalloc_used;
bytesToHuman(hmem,zmalloc_used);
bytesToHuman(peak_hmem,server.stat_peak_memory);
bytesToHuman(total_system_hmem,total_system_mem);
bytesToHuman(used_memory_lua_hmem,memory_lua);
bytesToHuman(used_memory_scripts_hmem,mh->lua_caches);
bytesToHuman(used_memory_rss_hmem,server.cron_malloc_stats.process_rss);
bytesToHuman(maxmemory_hmem,server.maxmemory);
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info,
"# Memory\r\n"
"used_memory:%zu\r\n" //reids内存分配器分配的内存总量,字节为单位,实际使用的,包括了redis进程的内部开销和数据占用的字节
"used_memory_human:%s\r\n" //分配的内存总量,人类可读
"used_memory_rss:%zu\r\n" //操作系统角度,redis已分配的内存总量(俗称常驻集大小),和top,ps等命令输出一致,Redis申请的,一般在key释放后,不会释放占用的空间,避免频繁申请损耗性能
"used_memory_rss_human:%s\r\n" //人类可读,常驻集
"used_memory_peak:%zu\r\n" //reids内存消耗峰值
"used_memory_peak_human:%s\r\n" //人类可读,内存消耗峰值
"used_memory_peak_perc:%.2f%%\r\n" //new,峰值内存超出分配内存(used_memory)的百分比,(used_memory/ used_memory_peak) *100%
"used_memory_overhead:%zu\r\n" //new,服务器为管理其内部数据结构而分配的所有开销的字节总和,包括所有客户端输出缓冲区、查询缓冲区、AOF重写缓冲区和主从复制的backlog
"used_memory_startup:%zu\r\n" //new,Redis在启动时消耗的初始内存量(以字节为单位)
"used_memory_dataset:%zu\r\n" //new,数据占用内存的大小,即used_memory-used_memory_overhead
"used_memory_dataset_perc:%.2f%%\r\n" //new,数据占用内存的大小百分比,即(used_memory_dataset/(used_memory-used_memory_startup)) *100%
"allocator_allocated:%zu\r\n" //new,分配器分配的内存
"allocator_active:%zu\r\n" //new,分配器活跃的内存
"allocator_resident:%zu\r\n" //new,分配器常驻的内存
"total_system_memory:%lu\r\n" //系统内存总量
"total_system_memory_human:%s\r\n" //人类可读,系统内存总量
"used_memory_lua:%lld\r\n" //lua引擎占用的内存
"used_memory_lua_human:%s\r\n" //人类可读,lua引擎占用的内存
"used_memory_scripts:%lld\r\n" //new,lua脚本占用的内存
"used_memory_scripts_human:%s\r\n" //new,人类可读,脚本占用的内存
"number_of_cached_scripts:%lu\r\n" //new,缓存脚本数
"maxmemory:%lld\r\n" //配置设置可使用最大内存值,Redis当成数据库使用,可不设置
"maxmemory_human:%s\r\n" //人类可读,设置可使用的最大内存值
"maxmemory_policy:%s\r\n" //达到最大内存值键的淘汰策略
"allocator_frag_ratio:%.2f\r\n" //new,分配器的碎片率
"allocator_frag_bytes:%zu\r\n" //new,分配器的碎片大小
"allocator_rss_ratio:%.2f\r\n" //new,分配器常驻内存比例
"allocator_rss_bytes:%zd\r\n" //new,分配器常驻内存大小
"rss_overhead_ratio:%.2f\r\n" //new,常驻内存开销比例
"rss_overhead_bytes:%zd\r\n" //new,常驻内存开销大小
"mem_fragmentation_ratio:%.2f\r\n" //碎片率,used_memory_rss和used_memory之间的比率
"mem_fragmentation_bytes:%zd\r\n" //new,内存碎片的大小
"mem_not_counted_for_evict:%zu\r\n" //new,在达到内存上限后不应被清理的内存大小,aof和slave buffer占用的内存
"mem_replication_backlog:%zu\r\n" //new,复制积压缓冲区占用内存大小,repl_backlog
"mem_clients_slaves:%zu\r\n" //new,所有从实例占用的内存,包括输入输出缓冲区,client结构体
"mem_clients_normal:%zu\r\n" //new,所有client占用的内存,包括同上
"mem_aof_buffer:%zu\r\n" //new,aof时占用的缓冲
"mem_allocator:%s\r\n" //内存分配器
"active_defrag_running:%d\r\n" //new,是否有正在进行的自动内存碎片整理,0表示没有活动的defrag任务正在运行,1表示有活动的defrag任务正在运行(defrag:表示内存碎片整理)
"lazyfree_pending_objects:%zu\r\n", //0表示不存在延迟释放(也有资料翻译未惰性删除)的挂起对象
zmalloc_used,
hmem,
server.cron_malloc_stats.process_rss,
used_memory_rss_hmem,
server.stat_peak_memory,
peak_hmem,
mh->peak_perc,
mh->overhead_total,
mh->startup_allocated,
mh->dataset,
mh->dataset_perc,
server.cron_malloc_stats.allocator_allocated,
server.cron_malloc_stats.allocator_active,
server.cron_malloc_stats.allocator_resident,
(unsigned long)total_system_mem,
total_system_hmem,
memory_lua,
used_memory_lua_hmem,
(long long) mh->lua_caches,
used_memory_scripts_hmem,
dictSize(server.lua_scripts),
server.maxmemory,
maxmemory_hmem,
evict_policy,
mh->allocator_frag,
mh->allocator_frag_bytes,
mh->allocator_rss,
mh->allocator_rss_bytes,
mh->rss_extra,
mh->rss_extra_bytes,
mh->total_frag, /* this is the total RSS overhead, including fragmentation, */
mh->total_frag_bytes, /* named so for backwards compatibility */
freeMemoryGetNotCountedMemory(),
mh->repl_backlog,
mh->clients_slaves,
mh->clients_normal,
mh->aof_buffer,
ZMALLOC_LIB,
server.active_defrag_running,
lazyfreeGetPendingObjectsCount()
);
freeMemoryOverheadData(mh);
}
/* Persistence */
if (allsections || defsections || !strcasecmp(section,"persistence")) {
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info,
"# Persistence\r\n"
"loading:%d\r\n" //服务器是否在载入持久化文件
"rdb_changes_since_last_save:%lld\r\n" //离最近一次成功生成rdb文件,写入命令的个数,即有多少个写入命令没有持久化
"rdb_bgsave_in_progress:%d\r\n" //服务器是否正在创建rdb文件
"rdb_last_save_time:%jd\r\n" //离最近一次成功创建rdb文件的时间戳。当前时间戳 - rdb_last_save_time=多少秒未成功生成rdb文件
"rdb_last_bgsave_status:%s\r\n" //最近一次rdb持久化是否成功
"rdb_last_bgsave_time_sec:%jd\r\n" //最近一次成功生成rdb文件耗时秒数
"rdb_current_bgsave_time_sec:%jd\r\n" //如果服务器正在创建rdb文件,那么这个域记录的就是当前的创建操作已经耗费的秒数
"rdb_last_cow_size:%zu\r\n" //new,上一次RBD保存操作期间写时复制的大小(以字节为单位),最近一次执行rdb占用的内存
"aof_enabled:%d\r\n" //是否开启了aof
"aof_rewrite_in_progress:%d\r\n" //标识aof的rewrite操作是否在进行中
"aof_rewrite_scheduled:%d\r\n" //rewrite任务计划,当客户端发送bgrewriteaof指令,如果当前rewrite子进程正在执行,那么将客户端请求的 bgrewriteaof变为计划任务,待aof子进程结束后执行rewrite
"aof_last_rewrite_time_sec:%jd\r\n" //最近一次aof rewrite耗费的时长
"aof_current_rewrite_time_sec:%jd\r\n" //如果rewrite操作正在进行,则记录所使用的时间,单位秒
"aof_last_bgrewrite_status:%s\r\n" //上次bgrewriteaof操作的状态
"aof_last_write_status:%s\r\n" //上次aof写入状态
"aof_last_cow_size:%zu\r\n", //new,AOF过程中父进程与子进程相比执行了多少修改(包括读缓冲区,写缓冲区,数据修改等),最近一次执行aof占用的内存
server.loading,
server.dirty,
server.rdb_child_pid != -1,
(intmax_t)server.lastsave,
(server.lastbgsave_status == C_OK) ? "ok" : "err",
(intmax_t)server.rdb_save_time_last,
(intmax_t)((server.rdb_child_pid == -1) ?
-1 : time(NULL)-server.rdb_save_time_start),
server.stat_rdb_cow_bytes,
server.aof_state != AOF_OFF,
server.aof_child_pid != -1,
server.aof_rewrite_scheduled,
(intmax_t)server.aof_rewrite_time_last,
(intmax_t)((server.aof_child_pid == -1) ?
-1 : time(NULL)-server.aof_rewrite_time_start),
(server.aof_lastbgrewrite_status == C_OK) ? "ok" : "err",
(server.aof_last_write_status == C_OK) ? "ok" : "err",
server.stat_aof_cow_bytes);
if (server.aof_state != AOF_OFF) {
info = sdscatprintf(info,
"aof_current_size:%lld\r\n" //aof当前尺寸
"aof_base_size:%lld\r\n" //服务器启动时或者aof重写最近一次执行之后aof文件的大小
"aof_pending_rewrite:%d\r\n" //是否有aof重写操作在等待rdb文件创建完毕之后执行
"aof_buffer_length:%zu\r\n" //aof buffer的大小
"aof_rewrite_buffer_length:%lu\r\n" //aof rewrite buffer的大小
"aof_pending_bio_fsync:%llu\r\n" //后台I/O队列里面,等待执行的fsync调用数量
"aof_delayed_fsync:%lu\r\n", //被延迟的fsync调用数量
(long long) server.aof_current_size,
(long long) server.aof_rewrite_base_size,
server.aof_rewrite_scheduled,
sdslen(server.aof_buf),
aofRewriteBufferSize(),
bioPendingJobsOfType(BIO_AOF_FSYNC),
server.aof_delayed_fsync);
}
if (server.loading) {
double perc;
time_t eta, elapsed;
off_t remaining_bytes = server.loading_total_bytes-
server.loading_loaded_bytes;
perc = ((double)server.loading_loaded_bytes /
(server.loading_total_bytes+1)) * 100;
elapsed = time(NULL)-server.loading_start_time;
if (elapsed == 0) {
eta = 1; /* A fake 1 second figure if we don't have
enough info */
} else {
eta = (elapsed*remaining_bytes)/(server.loading_loaded_bytes+1);
}
info = sdscatprintf(info,
"loading_start_time:%jd\r\n" //开始进行载入的时间
"loading_total_bytes:%llu\r\n" //正在载入的数据的大小
"loading_loaded_bytes:%llu\r\n" //已载入数据的大小
"loading_loaded_perc:%.2f\r\n" //已载入数据的百分比
"loading_eta_seconds:%jd\r\n", //载入剩余时间
(intmax_t) server.loading_start_time,
(unsigned long long) server.loading_total_bytes,
(unsigned long long) server.loading_loaded_bytes,
perc,
(intmax_t)eta
);
}
}
/* Stats */
if (allsections || defsections || !strcasecmp(section,"stats")) {
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info,
"# Stats\r\n"
"total_connections_received:%lld\r\n" //新创建连接个数,如果新创建连接过多,过度地创建和销毁连接对性能有影响,说明短连接严重或连接池使用有问题,需调研代码的连接设置
"total_commands_processed:%lld\r\n" //redis处理的命令数
"instantaneous_ops_per_sec:%lld\r\n" //redis当前的qps,redis内部较实时的每秒执行的命令数
"total_net_input_bytes:%lld\r\n" //redis网络入口流量字节数
"total_net_output_bytes:%lld\r\n" //redis网络出口流量字节数
"instantaneous_input_kbps:%.2f\r\n" //redis网络入口kps
"instantaneous_output_kbps:%.2f\r\n" //redis网络出口kps
"rejected_connections:%lld\r\n" //拒绝的连接个数,redis连接个数达到maxclients限制,拒绝新连接的个数
"sync_full:%lld\r\n" //主从完全同步成功次数
"sync_partial_ok:%lld\r\n" //主从部分同步成功次数
"sync_partial_err:%lld\r\n" //主从部分同步失败次数
"expired_keys:%lld\r\n" //运行以来过期的key的数量
"expired_stale_perc:%.2f\r\n" //new,抽样中可能过期的百分比
"expired_time_cap_reached_count:%lld\r\n" //new,清除过期键到达执行时间限制的次数,进行了多少次定期删除
"evicted_keys:%lld\r\n" //运行以来剔除(超过了maxmemory后)的key的数量
"keyspace_hits:%lld\r\n" //命中次数
"keyspace_misses:%lld\r\n" //没命中次数
"pubsub_channels:%ld\r\n" //当前使用中的频道数量
"pubsub_patterns:%lu\r\n" //当前使用的模式的数量
"latest_fork_usec:%lld\r\n" //最近一次fork操作阻塞redis进程的耗时数,单位微秒
"migrate_cached_sockets:%ld\r\n" //是否已经缓存了到该地址的连接
"slave_expires_tracked_keys:%zu\r\n" //new,从实例到期key数量
"active_defrag_hits:%lld\r\n" //new,自动内存碎片整理,功能是实验性的,不建议开启,主动碎片整理命中次数
"active_defrag_misses:%lld\r\n" //new,主动碎片整理未命中次数,进入activeDefragAlloc函数,但不满足当前对象所属run的利用率低于对应bin的平均水平
"active_defrag_key_hits:%lld\r\n" //new,主动碎片整理key命中次数
"active_defrag_key_misses:%lld\r\n", //new,主动碎片整理key未命中次数
server.stat_numconnections,
server.stat_numcommands,
getInstantaneousMetric(STATS_METRIC_COMMAND),
server.stat_net_input_bytes,
server.stat_net_output_bytes,
(float)getInstantaneousMetric(STATS_METRIC_NET_INPUT)/1024,
(float)getInstantaneousMetric(STATS_METRIC_NET_OUTPUT)/1024,
server.stat_rejected_conn,
server.stat_sync_full,
server.stat_sync_partial_ok,
server.stat_sync_partial_err,
server.stat_expiredkeys,
server.stat_expired_stale_perc*100,
server.stat_expired_time_cap_reached_count,
server.stat_evictedkeys,
server.stat_keyspace_hits,
server.stat_keyspace_misses,
dictSize(server.pubsub_channels),
listLength(server.pubsub_patterns),
server.stat_fork_time,
dictSize(server.migrate_cached_sockets),
getSlaveKeyWithExpireCount(),
server.stat_active_defrag_hits,
server.stat_active_defrag_misses,
server.stat_active_defrag_key_hits,
server.stat_active_defrag_key_misses);
}
/* Replication */
if (allsections || defsections || !strcasecmp(section,"replication")) {
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info,
"# Replication\r\n"
"role:%s\r\n", 实例角色,主或从
server.masterhost == NULL ? "master" : "slave");
if (server.masterhost) {
long long slave_repl_offset = 1;
if (server.master)
slave_repl_offset = server.master->reploff;
else if (server.cached_master)
slave_repl_offset = server.cached_master->reploff;
info = sdscatprintf(info,
"master_host:%s\r\n" //此节点对应的master的ip
"master_port:%d\r\n" //此节点对应的master的port
"master_link_status:%s\r\n" //slave端可查看它与master之间同步状态,当复制断开后表示down
"master_last_io_seconds_ago:%d\r\n" //主库多少秒未发送数据到从库
"master_sync_in_progress:%d\r\n" //从服务器是否在与主服务器进行同步
"slave_repl_offset:%lld\r\n" //slave复制偏移量
,server.masterhost,
server.masterport,
(server.repl_state == REPL_STATE_CONNECTED) ?
"up" : "down",
server.master ?
((int)(server.unixtime-server.master->lastinteraction)) : -1,
server.repl_state == REPL_STATE_TRANSFER,
slave_repl_offset
);
if (server.repl_state == REPL_STATE_TRANSFER) {
info = sdscatprintf(info,
"master_sync_left_bytes:%lld\r\n"
"master_sync_last_io_seconds_ago:%d\r\n"
, (long long)
(server.repl_transfer_size - server.repl_transfer_read),
(int)(server.unixtime-server.repl_transfer_lastio)
);
}
if (server.repl_state != REPL_STATE_CONNECTED) {
info = sdscatprintf(info,
"master_link_down_since_seconds:%jd\r\n",
(intmax_t)server.unixtime-server.repl_down_since);
}
info = sdscatprintf(info,
"slave_priority:%d\r\n" //slave优先级
"slave_read_only:%d\r\n", //从库是否设置只读
server.slave_priority,
server.repl_slave_ro);
}
info = sdscatprintf(info,
"connected_slaves:%lu\r\n", //连接的slave实例个数
listLength(server.slaves));
/* If min-slaves-to-write is active, write the number of slaves
* currently considered 'good'. */
if (server.repl_min_slaves_to_write &&
server.repl_min_slaves_max_lag) {
info = sdscatprintf(info,
"min_slaves_good_slaves:%d\r\n",
server.repl_good_slaves_count);
}
if (listLength(server.slaves)) {
int slaveid = 0;
listNode *ln;
listIter li;
listRewind(server.slaves,&li);
while((ln = listNext(&li))) {
client *slave = listNodeValue(ln);
char *state = NULL;
char ip[NET_IP_STR_LEN], *slaveip = slave->slave_ip;
int port;
long lag = 0;
if (slaveip[0] == '\0') {
if (anetPeerToString(slave->fd,ip,sizeof(ip),&port) == -1)
continue;
slaveip = ip;
}
switch(slave->replstate) {
case SLAVE_STATE_WAIT_BGSAVE_START:
case SLAVE_STATE_WAIT_BGSAVE_END:
state = "wait_bgsave";
break;
case SLAVE_STATE_SEND_BULK:
state = "send_bulk";
break;
case SLAVE_STATE_ONLINE:
state = "online";
break;
}
if (state == NULL) continue;
if (slave->replstate == SLAVE_STATE_ONLINE)
lag = time(NULL) - slave->repl_ack_time;
// slave0:ip=192.168.64.104,port=9021,state=online,offset=6713173004,lag=0 #lag从库多少秒未向主库发送REPLCONF命令
info = sdscatprintf(info,
"slave%d:ip=%s,port=%d,state=%s,"
"offset=%lld,lag=%ld\r\n",
slaveid,slaveip,slave->slave_listening_port,state,
slave->repl_ack_off, lag);
slaveid++;
}
}
info = sdscatprintf(info,
"master_replid:%s\r\n" //new,从节点,当前正在同步的master复制id;主节点,run_id
"master_replid2:%s\r\n" //new,同步过的前一个master的复制id,用于failover的PSYNC;4.0新特性,是从节点没用,提升为主节点后,其他从节点可以根据这个值进行部分同步
"master_repl_offset:%lld\r\n" //主从同步偏移量,此值如果和上面的offset相同说明主从一致没延迟
"second_repl_offset:%lld\r\n" //new,同步过的上一个master的复制偏移量,从节点提升为主节点后,其他从节点复制偏移量小于该值,可以进行部分同步
"repl_backlog_active:%d\r\n" //复制积压缓冲区是否开启
"repl_backlog_size:%lld\r\n" //复制积压缓冲大小
"repl_backlog_first_byte_offset:%lld\r\n" //复制缓冲区里偏移量的大小
"repl_backlog_histlen:%lld\r\n", //此值等于 master_repl_offset - repl_backlog_first_byte_offset,该值不会超过repl_backlog_size的大小
server.replid,
server.replid2,
server.master_repl_offset,
server.second_replid_offset,
server.repl_backlog != NULL,
server.repl_backlog_size,
server.repl_backlog_off,
server.repl_backlog_histlen);
}
/* CPU */
if (allsections || defsections || !strcasecmp(section,"cpu")) {
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info,
"# CPU\r\n"
"used_cpu_sys:%ld.%06ld\r\n" //Redis服务消耗的系统cpu
"used_cpu_user:%ld.%06ld\r\n" //Redis服务消耗的用户cpu
"used_cpu_sys_children:%ld.%06ld\r\n" //后台进程占用的系统cpu
"used_cpu_user_children:%ld.%06ld\r\n", //后台进程占用的用户cpu
(long)self_ru.ru_stime.tv_sec, (long)self_ru.ru_stime.tv_usec,
(long)self_ru.ru_utime.tv_sec, (long)self_ru.ru_utime.tv_usec,
(long)c_ru.ru_stime.tv_sec, (long)c_ru.ru_stime.tv_usec,
(long)c_ru.ru_utime.tv_sec, (long)c_ru.ru_utime.tv_usec);
}
/* Command statistics */
if (allsections || !strcasecmp(section,"commandstats")) {
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info, "# Commandstats\r\n");
struct redisCommand *c;
dictEntry *de;
dictIterator *di;
di = dictGetSafeIterator(server.commands);
while((de = dictNext(di)) != NULL) {
c = (struct redisCommand *) dictGetVal(de);
if (!c->calls) continue;
info = sdscatprintf(info,
"cmdstat_%s:calls=%lld,usec=%lld,usec_per_call=%.2f\r\n",
c->name, c->calls, c->microseconds,
(c->calls == 0) ? 0 : ((float)c->microseconds/c->calls));
}
dictReleaseIterator(di);
}
/* Cluster */
if (allsections || defsections || !strcasecmp(section,"cluster")) {
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info,
"# Cluster\r\n"
"cluster_enabled:%d\r\n", //是否开启集群模式
server.cluster_enabled);
}
/* Key space */
if (allsections || defsections || !strcasecmp(section,"keyspace")) {
if (sections++) info = sdscat(info,"\r\n");
info = sdscatprintf(info, "# Keyspace\r\n");
for (j = 0; j < server.dbnum; j++) {
long long keys, vkeys;
keys = dictSize(server.db[j].dict);
vkeys = dictSize(server.db[j].expires);
if (keys || vkeys) {
info = sdscatprintf(info,
"db%d:keys=%lld,expires=%lld,avg_ttl=%lld\r\n", //数据库的统计信息
j, keys, vkeys, server.db[j].avg_ttl);
}
}
}
return info;
}说明:
其中标注new为5.0版本比3.0版本新增的属性信息
版权声明:本文为m0_51787822原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。