在命令行下,flushAll不成功,然后分node进行flushAll,成功!代码如下:
def getJedisCluster(): JedisCluster = { val nodes = new util.HashSet[HostAndPort]() nodes.add(new HostAndPort("127.0.0.1", 7000)) nodes.add(new HostAndPort("127.0.0.1", 7001)) nodes.add(new HostAndPort("127.0.0.1", 7002)) nodes.add(new HostAndPort("127.0.0.1", 7003)) nodes.add(new HostAndPort("127.0.0.1", 7004)) nodes.add(new HostAndPort("127.0.0.1", 7005)) nodes.add(new HostAndPort("127.0.0.1", 7006)) new JedisCluster(nodes) } def main(args: Array[String]): Unit = { val sparkConf = new SparkConf().setAppName("RedisInitWithSparseFeature") val sparkContext = new SparkContext(sparkConf) val jedisCluster = getJedisCluster() val poolIterator = jedisCluster.getClusterNodes.values.iterator while (poolIterator.hasNext) { val pool = poolIterator.next val jedis = pool.getResource try jedis.flushAll catch { case ex: Exception => logError("Exception in cache service: {} ", ex) } finally jedis.close() } logInfo("flush successs!") }
参考:
https://programtalk.com/java-api-usage-examples/redis.clients.jedis.JedisCluster/