drbd_receiver.rar

  • PUDN用户
    了解作者
  • Unix_Linux
    开发工具
  • 36KB
    文件大小
  • rar
    文件格式
  • 0
    收藏次数
  • 1 积分
    下载积分
  • 1
    下载次数
  • 2014-09-30 09:35
    上传日期
drbd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
drbd_receiver.rar
  • drbd_receiver.c
    145.9KB
内容介绍
/* drbd_receiver.c This file is part of DRBD by Philipp Reisner and Lars Ellenberg. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. drbd is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. drbd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with drbd; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <asm/uaccess.h rel='nofollow' onclick='return false;'> #include <net/sock.h> #include <linux/drbd.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/in.h> #include <linux/mm.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/slab.h> #include <linux/pkt_sched.h> #define __KERNEL_SYSCALLS__ #include <linux/unistd.h> #include <linux/vmalloc.h> #include <linux/random.h> #include <linux/string.h> #include <linux/scatterlist.h> #include "drbd_int.h" #include "drbd_req.h" #include "drbd_vli.h" struct packet_info { enum drbd_packet cmd; unsigned int size; unsigned int vnr; void *data; }; enum finish_epoch { FE_STILL_LIVE, FE_DESTROYED, FE_RECYCLED, }; static int drbd_do_features(struct drbd_tconn *tconn); static int drbd_do_auth(struct drbd_tconn *tconn); static int drbd_disconnected(struct drbd_conf *mdev); static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event); static int e_end_block(struct drbd_work *, int); #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) /* * some helper functions to deal with single linked page lists, * page->private being our "next" pointer. */ /* If at least n pages are linked at head, get n pages off. * Otherwise, don't modify head, and return NULL. * Locking is the responsibility of the caller. */ static struct page *page_chain_del(struct page **head, int n) { struct page *page; struct page *tmp; BUG_ON(!n); BUG_ON(!head); page = *head; if (!page) return NULL; while (page) { tmp = page_chain_next(page); if (--n == 0) break; /* found sufficient pages */ if (tmp == NULL) /* insufficient pages, don't use any of them. */ return NULL; page = tmp; } /* add end of list marker for the returned list */ set_page_private(page, 0); /* actual return value, and adjustment of head */ page = *head; *head = tmp; return page; } /* may be used outside of locks to find the tail of a (usually short) * "private" page chain, before adding it back to a global chain head * with page_chain_add() under a spinlock. */ static struct page *page_chain_tail(struct page *page, int *len) { struct page *tmp; int i = 1; while ((tmp = page_chain_next(page))) ++i, page = tmp; if (len) *len = i; return page; } static int page_chain_free(struct page *page) { struct page *tmp; int i = 0; page_chain_for_each_safe(page, tmp) { put_page(page); ++i; } return i; } static void page_chain_add(struct page **head, struct page *chain_first, struct page *chain_last) { #if 1 struct page *tmp; tmp = page_chain_tail(chain_first, NULL); BUG_ON(tmp != chain_last); #endif /* add chain to head */ set_page_private(chain_last, (unsigned long)*head); *head = chain_first; } static struct page *__drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number) { struct page *page = NULL; struct page *tmp = NULL; unsigned int i = 0; /* Yes, testing drbd_pp_vacant outside the lock is racy. * So what. It saves a spin_lock. */ if (drbd_pp_vacant >= number) { spin_lock(&drbd_pp_lock); page = page_chain_del(&drbd_pp_pool, number); if (page) drbd_pp_vacant -= number; spin_unlock(&drbd_pp_lock); if (page) return page; } /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ for (i = 0; i < number; i++) { tmp = alloc_page(GFP_TRY); if (!tmp) break; set_page_private(tmp, (unsigned long)page); page = tmp; } if (i == number) return page; /* Not enough pages immediately available this time. * No need to jump around here, drbd_alloc_pages will retry this * function "soon". */ if (page) { tmp = page_chain_tail(page, NULL); spin_lock(&drbd_pp_lock); page_chain_add(&drbd_pp_pool, page, tmp); drbd_pp_vacant += i; spin_unlock(&drbd_pp_lock); } return NULL; } static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev, struct list_head *to_be_freed) { struct drbd_peer_request *peer_req; struct list_head *le, *tle; /* The EEs are always appended to the end of the list. Since they are sent in order over the wire, they have to finish in order. As soon as we see the first not finished we can stop to examine the list... */ list_for_each_safe(le, tle, &mdev->net_ee) { peer_req = list_entry(le, struct drbd_peer_request, w.list); if (drbd_peer_req_has_active_page(peer_req)) break; list_move(le, to_be_freed); } } static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) { LIST_HEAD(reclaimed); struct drbd_peer_request *peer_req, *t; spin_lock_irq(&mdev->tconn->req_lock); reclaim_finished_net_peer_reqs(mdev, &reclaimed); spin_unlock_irq(&mdev->tconn->req_lock); list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) drbd_free_net_peer_req(mdev, peer_req); } /** * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled) * @mdev: DRBD device. * @number: number of pages requested * @retry: whether to retry, if not enough pages are available right now * * Tries to allocate number pages, first from our own page pool, then from * the kernel, unless this allocation would exceed the max_buffers setting. * Possibly retry until DRBD frees sufficient pages somewhere else. * * Returns a page chain linked via page->private. */ struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number, bool retry) { struct page *page = NULL; struct net_conf *nc; DEFINE_WAIT(wait); int mxb; /* Yes, we may run up to @number over max_buffers. If we * follow it strictly, the admin will get it wrong anyways. */ rcu_read_lock(); nc = rcu_dereference(mdev->tconn->net_conf); mxb = nc ? nc->max_buffers : 1000000; rcu_read_unlock(); if (atomic_read(&mdev->pp_in_use) < mxb) page = __drbd_alloc_pages(mdev, number); while (page == NULL) { prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); drbd_kick_lo_and_reclaim_net(mdev); if (atomic_read(&mdev->pp_in_use) < mxb) { page = __drbd_alloc_pages(mdev, number); if (page) break; } if (!retry) break; if (signal_pending(current)) { dev_warn(DEV, "drbd_alloc_pages interrupted!\n"); break; } schedule(); } finish_wait(&drbd_pp_wait, &wait); if (page) atomic_add(number, &mdev->pp_in_use); return page; } /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages. * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock); * Either links the page chain back to the global pool, * or returns all pages to the system. */ static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net) { atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; int i; if (page == NULL) return; if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count) i = page_chain_free(page); else { struct page *tmp; tmp = pag
评论
    相关推荐
    • choosrnk (2).rar
      This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ...
    • peakdet.rar
      This matlab code detacts peaks in ECG signal without help of any toolbox. Hope this is useful to you.
    • thn-hope.rar
      C#写的FFT函数,希望能给大家以提示,没有压缩密码
    • lnetctl.rar
      Portals is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY.
    • ortfac.rar
      This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ...
    • pm21_src_monitoring_diss.zip
      the hope that it will be useful, but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose. For more information, please see ...
    • Ezusb2131-1.0.rar
      This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General ...
    • Hope:UE4教程项目
      Hope-UE4教程项目 在我的博客中描述了UniversalReference的参考代码: MIT许可版权所有(c)2015 horizo​​n-studio 特此免费授予获得此软件和相关文档文件(“软件”)副本的任何人无限制地处理软件的权利,包括...
    • aforce code.rar
      This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ...
    • matlabcnhelp.rar
      matlab中文帮助很难找的,快速下载