C++开发在IOS环境下运行的LRUCache缓存功能
作者:
本文着重介绍如何在XCODE中,通过C++开发在IOS环境下运行的缓存功能。算法基于LRU,最近最少使用,需要的朋友可以参考下
本文着重介绍如何在XCODE中,通过C++开发在IOS环境下运行的缓存功能。算法基于LRU(最近最少使用)。有关lru详见:
http://en.wikipedia.org/wiki/Page_replacement_algorithm#Least_recently_used
之前在网上看到过网友的一个C++实现,感觉不错,所以核心代码就采用了他的设计。
原作者通过两个MAP对象来记录缓存数据和LRU队列,注意其中的LRU队列并不是按照常用的方式使用LIST链表,而是使用MAP来代替LIST,有关这一点原作者已做了说明。
另外还有人将MRU与LRU组合在一起使用,当然如果清楚了设计原理,那么就很容易理解了。
考虑到缓存实现多数使用单例模式,这里使用C++的模版方式设计了一个Singlton基类,这样以后只要继承该类,子类就会支持单例模式了。其代码如下:
//
// SingltonT.h
//
#ifndef SingltonT_h
#define SingltonT_h
#include <iostream>
#include <tr1/memory>
using namespace std;
using namespace std::tr1;
template <typename T>
class Singlton {
public:
static T* instance();
void print() {
cout << "haha" << endl;
}
~Singlton() {
cout << "destruct singlton" << endl;
}
protected:
Singlton();
//private:
protected:
static std::tr1::shared_ptr<T> s_instance;
//Singlton();
};
template <typename T>
std::tr1::shared_ptr<T> Singlton<T>::s_instance;
template <typename T>
Singlton<T>::Singlton() {
cout << "construct singlton" << endl;
}
template <typename T>
T* Singlton<T>::instance() {
if (!s_instance.get())
s_instance.reset(new T);
return s_instance.get();
}
另外考虑到在多线程下对static单例对象进行操作,会出现并发访问同步的问题,所以这里使用了读写互斥锁来进行set(设置数据)的同步。如下:
#ifndef _RWLOCK_H_
#define _RWLOCK_H_
#define LOCK(q) while (__sync_lock_test_and_set(&(q)->lock,1)) {}
#define UNLOCK(q) __sync_lock_release(&(q)->lock);
struct rwlock {
int write;
int read;
};
static inline void
rwlock_init(struct rwlock *lock) {
lock->write = 0;
lock->read = 0;
}
static inline void
rwlock_rlock(struct rwlock *lock) {
for (;;) {//不断循环,直到对读计数器累加成功
while(lock->write) {
__sync_synchronize();
}
__sync_add_and_fetch(&lock->read,1);
if (lock->write) {//当已是写锁时,则去掉读锁记数器
__sync_sub_and_fetch(&lock->read,1);
} else {
break;
}
}
}
static inline void
rwlock_wlock(struct rwlock *lock) {
__sync_lock_test_and_set(&lock->write,1);
while(lock->read) {
//http://blog.itmem.com/?m=201204
//http://gcc.gnu.org/onlinedocs/gcc-4.6.2/gcc/Atomic-Builtins.html
__sync_synchronize();//很重要,如果去掉,g++ -O3 优化编译后的生成的程序会产生死锁
}
}
static inline void
rwlock_wunlock(struct rwlock *lock) {
__sync_lock_release(&lock->write);
}
static inline void
rwlock_runlock(struct rwlock *lock) {
__sync_sub_and_fetch(&lock->read,1);
}
这里并未使用pthread_mutex_t来设计锁,而是使用了__sync_fetch_and_add指令体系,当然最终是否如上面链接中作者所说的比pthread_mutex_t性能要高7-8倍,我没测试过,感兴趣的朋友也可以帮助测试一下。
有了这两个类之后,我又补充了原文作者中所提到了KEY比较方法的定义,同时引入了id来支持object-c的对象缓存,最终代码修改如下:
#ifndef _MAP_LRU_CACHE_H_
#define _MAP_LRU_CACHE_H_
#include <string.h>
#include <iostream>
#include "rwlock.h"
#include <stdio.h>
#include <sys/malloc.h>
using namespace std;
namespace lru_cache {
static const int DEF_CAPACITY = 100000;//默认缓存记录数
typedef unsigned long long virtual_time;
typedef struct _HashKey
{
NSString* key;
}HashKey;
typedef struct _HashValue
{
id value_;
virtual_time access_;
}HashValue;
//仅针对HashKey比较器
template <class key_t>
struct hashkey_compare{
bool operator()(key_t x, key_t y) const{
return x < y;
}
};
template <>
struct hashkey_compare<HashKey>
{
bool operator()(HashKey __x, HashKey __y) const{
string x = [__x.key UTF8String];
string y = [__y.key UTF8String];
return x < y;
}
};
//自定义map类型
template <typename K, typename V, typename _Compare = hashkey_compare<K>,
typename _Alloc = std::allocator<std::pair<const K, V> > >
class lru_map: public map<K, V, _Compare, _Alloc>{};
class CLRUCache
{
public:
CLRUCache() : _now(0){
_lru_list = shared_ptr<lru_map<virtual_time, HashKey> >(new lru_map<virtual_time, HashKey>);
_hash_table = shared_ptr<lru_map<HashKey, HashValue> > (new lru_map<HashKey, HashValue>);
}
~CLRUCache(){
_lru_list->clear();
_hash_table->clear();
}
int set( const HashKey& key, const id &value )
{
HashValue hash_value;
hash_value.value_ = value;
hash_value.access_ = get_virtual_time();
pair< map<HashKey, HashValue>::iterator, bool > ret = _hash_table->insert(make_pair(key, hash_value));
if ( !ret.second ){
// key already exist
virtual_time old_access = (*_hash_table)[key].access_;
map<virtual_time, HashKey>::iterator iter = _lru_list->find(old_access);
if(iter != _lru_list->end())
{
_lru_list->erase(iter);
}
_lru_list->insert(make_pair(hash_value.access_, key));
(*_hash_table)[key] = hash_value;
}
else {
_lru_list->insert(make_pair(hash_value.access_, key));
if ( _hash_table->size() > DEF_CAPACITY )
{
// get the least recently used key
map<virtual_time, HashKey>::iterator iter = _lru_list->begin();
_hash_table->erase( iter->second );
// remove last key from list
_lru_list->erase(iter);
}
}
return 0;
}
HashValue* get( const HashKey& key )
{
map<HashKey, HashValue>::iterator iter = _hash_table->find(key);
if ( iter != _hash_table->end() )
{
virtual_time old_access = iter->second.access_;
iter->second.access_ = get_virtual_time();
//调整当前key在LRU列表中的位置
map<virtual_time, HashKey>::iterator it = _lru_list->find(old_access);
if(it != _lru_list->end()) {
_lru_list->erase(it);
}
_lru_list->insert(make_pair(iter->second.access_, key));
return &(iter->second);
}
else{
return NULL;
}
}
unsigned get_lru_list_size(){ return (unsigned)_lru_list->size(); }
unsigned get_hash_table_size() { return (unsigned)_hash_table->size(); }
virtual_time get_now() { return _now; }
private:
virtual_time get_virtual_time()
{
return ++_now;
}
shared_ptr<lru_map<virtual_time, HashKey> > _lru_list;
shared_ptr<lru_map<HashKey, HashValue> > _hash_table;
virtual_time _now;
};
#endif
接下来看一下如果结合单例和rwlock来设计最终的缓存功能,如下:
using namespace lru_cache;
class DZCache: public Singlton<DZCache>
{
friend class Singlton<DZCache>;
private:
shared_ptr<CLRUCache> clu_cache;
rwlock *lock;
DZCache(){
lock =(rwlock*) malloc(sizeof(rwlock));
rwlock_init(lock);
clu_cache = shared_ptr<CLRUCache>(new CLRUCache());
cout << "construct JobList" << endl;
}
DZCache * Instance() {
return s_instance.get();
}
public:
~DZCache(){
free(lock);
}
static DZCache& getInstance(){
return *instance();
}
void set(NSString* key, id value){
//加锁
rwlock_wlock(lock);
HashKey hash_key;
hash_key.key = key;
clu_cache->set(hash_key, value);
rwlock_wunlock(lock);
}
id get(NSString* key){
HashKey hash_key;
hash_key.key = key;
HashValue* value = clu_cache->get(hash_key);
if(value == NULL){
return nil;
}
else{
return value->value_;
}
}
};
#endif
最后看一下如何使用:
void testLRUCache(){
//指针方式
DZCache::instance()->set(@"name", @"daizhj");//设置
NSString* name = (NSString*)DZCache::instance()->get(@"name");//获取
std::cout<<[name UTF8String]<<endl;
NSNumber * age=[NSNumber numberWithInt:123123];
DZCache::instance()->set(@"age", age);
age = (NSNumber*)DZCache::instance()->get(@"age");
//对象方式
DZCache::getInstance().set(@"name", @"daizhenjun");
name = (NSString*)DZCache::getInstance().get(@"name");
std::cout<<[name UTF8String]<<endl;
age = [NSNumber numberWithInt:123456];
DZCache::getInstance().set(@"age", age);
age = (NSNumber*)DZCache::getInstance().get(@"age");
}
好了,今天的内容就先到这里了。
http://en.wikipedia.org/wiki/Page_replacement_algorithm#Least_recently_used
之前在网上看到过网友的一个C++实现,感觉不错,所以核心代码就采用了他的设计。
原作者通过两个MAP对象来记录缓存数据和LRU队列,注意其中的LRU队列并不是按照常用的方式使用LIST链表,而是使用MAP来代替LIST,有关这一点原作者已做了说明。
另外还有人将MRU与LRU组合在一起使用,当然如果清楚了设计原理,那么就很容易理解了。
考虑到缓存实现多数使用单例模式,这里使用C++的模版方式设计了一个Singlton基类,这样以后只要继承该类,子类就会支持单例模式了。其代码如下:
复制代码 代码如下:
//
// SingltonT.h
//
#ifndef SingltonT_h
#define SingltonT_h
#include <iostream>
#include <tr1/memory>
using namespace std;
using namespace std::tr1;
template <typename T>
class Singlton {
public:
static T* instance();
void print() {
cout << "haha" << endl;
}
~Singlton() {
cout << "destruct singlton" << endl;
}
protected:
Singlton();
//private:
protected:
static std::tr1::shared_ptr<T> s_instance;
//Singlton();
};
template <typename T>
std::tr1::shared_ptr<T> Singlton<T>::s_instance;
template <typename T>
Singlton<T>::Singlton() {
cout << "construct singlton" << endl;
}
template <typename T>
T* Singlton<T>::instance() {
if (!s_instance.get())
s_instance.reset(new T);
return s_instance.get();
}
另外考虑到在多线程下对static单例对象进行操作,会出现并发访问同步的问题,所以这里使用了读写互斥锁来进行set(设置数据)的同步。如下:
复制代码 代码如下:
#ifndef _RWLOCK_H_
#define _RWLOCK_H_
#define LOCK(q) while (__sync_lock_test_and_set(&(q)->lock,1)) {}
#define UNLOCK(q) __sync_lock_release(&(q)->lock);
struct rwlock {
int write;
int read;
};
static inline void
rwlock_init(struct rwlock *lock) {
lock->write = 0;
lock->read = 0;
}
static inline void
rwlock_rlock(struct rwlock *lock) {
for (;;) {//不断循环,直到对读计数器累加成功
while(lock->write) {
__sync_synchronize();
}
__sync_add_and_fetch(&lock->read,1);
if (lock->write) {//当已是写锁时,则去掉读锁记数器
__sync_sub_and_fetch(&lock->read,1);
} else {
break;
}
}
}
static inline void
rwlock_wlock(struct rwlock *lock) {
__sync_lock_test_and_set(&lock->write,1);
while(lock->read) {
//http://blog.itmem.com/?m=201204
//http://gcc.gnu.org/onlinedocs/gcc-4.6.2/gcc/Atomic-Builtins.html
__sync_synchronize();//很重要,如果去掉,g++ -O3 优化编译后的生成的程序会产生死锁
}
}
static inline void
rwlock_wunlock(struct rwlock *lock) {
__sync_lock_release(&lock->write);
}
static inline void
rwlock_runlock(struct rwlock *lock) {
__sync_sub_and_fetch(&lock->read,1);
}
这里并未使用pthread_mutex_t来设计锁,而是使用了__sync_fetch_and_add指令体系,当然最终是否如上面链接中作者所说的比pthread_mutex_t性能要高7-8倍,我没测试过,感兴趣的朋友也可以帮助测试一下。
有了这两个类之后,我又补充了原文作者中所提到了KEY比较方法的定义,同时引入了id来支持object-c的对象缓存,最终代码修改如下:
复制代码 代码如下:
#ifndef _MAP_LRU_CACHE_H_
#define _MAP_LRU_CACHE_H_
#include <string.h>
#include <iostream>
#include "rwlock.h"
#include <stdio.h>
#include <sys/malloc.h>
using namespace std;
namespace lru_cache {
static const int DEF_CAPACITY = 100000;//默认缓存记录数
typedef unsigned long long virtual_time;
typedef struct _HashKey
{
NSString* key;
}HashKey;
typedef struct _HashValue
{
id value_;
virtual_time access_;
}HashValue;
//仅针对HashKey比较器
template <class key_t>
struct hashkey_compare{
bool operator()(key_t x, key_t y) const{
return x < y;
}
};
template <>
struct hashkey_compare<HashKey>
{
bool operator()(HashKey __x, HashKey __y) const{
string x = [__x.key UTF8String];
string y = [__y.key UTF8String];
return x < y;
}
};
//自定义map类型
template <typename K, typename V, typename _Compare = hashkey_compare<K>,
typename _Alloc = std::allocator<std::pair<const K, V> > >
class lru_map: public map<K, V, _Compare, _Alloc>{};
class CLRUCache
{
public:
CLRUCache() : _now(0){
_lru_list = shared_ptr<lru_map<virtual_time, HashKey> >(new lru_map<virtual_time, HashKey>);
_hash_table = shared_ptr<lru_map<HashKey, HashValue> > (new lru_map<HashKey, HashValue>);
}
~CLRUCache(){
_lru_list->clear();
_hash_table->clear();
}
int set( const HashKey& key, const id &value )
{
HashValue hash_value;
hash_value.value_ = value;
hash_value.access_ = get_virtual_time();
pair< map<HashKey, HashValue>::iterator, bool > ret = _hash_table->insert(make_pair(key, hash_value));
if ( !ret.second ){
// key already exist
virtual_time old_access = (*_hash_table)[key].access_;
map<virtual_time, HashKey>::iterator iter = _lru_list->find(old_access);
if(iter != _lru_list->end())
{
_lru_list->erase(iter);
}
_lru_list->insert(make_pair(hash_value.access_, key));
(*_hash_table)[key] = hash_value;
}
else {
_lru_list->insert(make_pair(hash_value.access_, key));
if ( _hash_table->size() > DEF_CAPACITY )
{
// get the least recently used key
map<virtual_time, HashKey>::iterator iter = _lru_list->begin();
_hash_table->erase( iter->second );
// remove last key from list
_lru_list->erase(iter);
}
}
return 0;
}
HashValue* get( const HashKey& key )
{
map<HashKey, HashValue>::iterator iter = _hash_table->find(key);
if ( iter != _hash_table->end() )
{
virtual_time old_access = iter->second.access_;
iter->second.access_ = get_virtual_time();
//调整当前key在LRU列表中的位置
map<virtual_time, HashKey>::iterator it = _lru_list->find(old_access);
if(it != _lru_list->end()) {
_lru_list->erase(it);
}
_lru_list->insert(make_pair(iter->second.access_, key));
return &(iter->second);
}
else{
return NULL;
}
}
unsigned get_lru_list_size(){ return (unsigned)_lru_list->size(); }
unsigned get_hash_table_size() { return (unsigned)_hash_table->size(); }
virtual_time get_now() { return _now; }
private:
virtual_time get_virtual_time()
{
return ++_now;
}
shared_ptr<lru_map<virtual_time, HashKey> > _lru_list;
shared_ptr<lru_map<HashKey, HashValue> > _hash_table;
virtual_time _now;
};
#endif
接下来看一下如果结合单例和rwlock来设计最终的缓存功能,如下:
复制代码 代码如下:
using namespace lru_cache;
class DZCache: public Singlton<DZCache>
{
friend class Singlton<DZCache>;
private:
shared_ptr<CLRUCache> clu_cache;
rwlock *lock;
DZCache(){
lock =(rwlock*) malloc(sizeof(rwlock));
rwlock_init(lock);
clu_cache = shared_ptr<CLRUCache>(new CLRUCache());
cout << "construct JobList" << endl;
}
DZCache * Instance() {
return s_instance.get();
}
public:
~DZCache(){
free(lock);
}
static DZCache& getInstance(){
return *instance();
}
void set(NSString* key, id value){
//加锁
rwlock_wlock(lock);
HashKey hash_key;
hash_key.key = key;
clu_cache->set(hash_key, value);
rwlock_wunlock(lock);
}
id get(NSString* key){
HashKey hash_key;
hash_key.key = key;
HashValue* value = clu_cache->get(hash_key);
if(value == NULL){
return nil;
}
else{
return value->value_;
}
}
};
#endif
最后看一下如何使用:
复制代码 代码如下:
void testLRUCache(){
//指针方式
DZCache::instance()->set(@"name", @"daizhj");//设置
NSString* name = (NSString*)DZCache::instance()->get(@"name");//获取
std::cout<<[name UTF8String]<<endl;
NSNumber * age=[NSNumber numberWithInt:123123];
DZCache::instance()->set(@"age", age);
age = (NSNumber*)DZCache::instance()->get(@"age");
//对象方式
DZCache::getInstance().set(@"name", @"daizhenjun");
name = (NSString*)DZCache::getInstance().get(@"name");
std::cout<<[name UTF8String]<<endl;
age = [NSNumber numberWithInt:123456];
DZCache::getInstance().set(@"age", age);
age = (NSNumber*)DZCache::getInstance().get(@"age");
}
好了,今天的内容就先到这里了。