Showing error 1086

User: Jiri Slaby
Error type: Leaving function in locked state
Error type description: Some lock is not unlocked on all paths of a function, so it is leaked
File location: fs/cifs/file.c
Line in file: 333
Project: Linux Kernel
Project version: 2.6.28
Tools: Undetermined 1
Entered: 2012-03-04 17:07:06 UTC


Source:

   1/*
   2 *   fs/cifs/file.c
   3 *
   4 *   vfs operations that deal with files
   5 *
   6 *   Copyright (C) International Business Machines  Corp., 2002,2007
   7 *   Author(s): Steve French (sfrench@us.ibm.com)
   8 *              Jeremy Allison (jra@samba.org)
   9 *
  10 *   This library is free software; you can redistribute it and/or modify
  11 *   it under the terms of the GNU Lesser General Public License as published
  12 *   by the Free Software Foundation; either version 2.1 of the License, or
  13 *   (at your option) any later version.
  14 *
  15 *   This library is distributed in the hope that it will be useful,
  16 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
  18 *   the GNU Lesser General Public License for more details.
  19 *
  20 *   You should have received a copy of the GNU Lesser General Public License
  21 *   along with this library; if not, write to the Free Software
  22 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23 */
  24#include <linux/fs.h>
  25#include <linux/backing-dev.h>
  26#include <linux/stat.h>
  27#include <linux/fcntl.h>
  28#include <linux/pagemap.h>
  29#include <linux/pagevec.h>
  30#include <linux/writeback.h>
  31#include <linux/task_io_accounting_ops.h>
  32#include <linux/delay.h>
  33#include <asm/div64.h>
  34#include "cifsfs.h"
  35#include "cifspdu.h"
  36#include "cifsglob.h"
  37#include "cifsproto.h"
  38#include "cifs_unicode.h"
  39#include "cifs_debug.h"
  40#include "cifs_fs_sb.h"
  41
  42static inline struct cifsFileInfo *cifs_init_private(
  43        struct cifsFileInfo *private_data, struct inode *inode,
  44        struct file *file, __u16 netfid)
  45{
  46        memset(private_data, 0, sizeof(struct cifsFileInfo));
  47        private_data->netfid = netfid;
  48        private_data->pid = current->tgid;
  49        init_MUTEX(&private_data->fh_sem);
  50        mutex_init(&private_data->lock_mutex);
  51        INIT_LIST_HEAD(&private_data->llist);
  52        private_data->pfile = file; /* needed for writepage */
  53        private_data->pInode = inode;
  54        private_data->invalidHandle = false;
  55        private_data->closePend = false;
  56        /* we have to track num writers to the inode, since writepages
  57        does not tell us which handle the write is for so there can
  58        be a close (overlapping with write) of the filehandle that
  59        cifs_writepages chose to use */
  60        atomic_set(&private_data->wrtPending, 0);
  61
  62        return private_data;
  63}
  64
  65static inline int cifs_convert_flags(unsigned int flags)
  66{
  67        if ((flags & O_ACCMODE) == O_RDONLY)
  68                return GENERIC_READ;
  69        else if ((flags & O_ACCMODE) == O_WRONLY)
  70                return GENERIC_WRITE;
  71        else if ((flags & O_ACCMODE) == O_RDWR) {
  72                /* GENERIC_ALL is too much permission to request
  73                   can cause unnecessary access denied on create */
  74                /* return GENERIC_ALL; */
  75                return (GENERIC_READ | GENERIC_WRITE);
  76        }
  77
  78        return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
  79                FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
  80                FILE_READ_DATA);
  81
  82
  83}
  84
  85static inline int cifs_get_disposition(unsigned int flags)
  86{
  87        if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
  88                return FILE_CREATE;
  89        else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
  90                return FILE_OVERWRITE_IF;
  91        else if ((flags & O_CREAT) == O_CREAT)
  92                return FILE_OPEN_IF;
  93        else if ((flags & O_TRUNC) == O_TRUNC)
  94                return FILE_OVERWRITE;
  95        else
  96                return FILE_OPEN;
  97}
  98
  99/* all arguments to this function must be checked for validity in caller */
 100static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
 101        struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
 102        struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
 103        char *full_path, int xid)
 104{
 105        struct timespec temp;
 106        int rc;
 107
 108        /* want handles we can use to read with first
 109           in the list so we do not have to walk the
 110           list to search for one in write_begin */
 111        if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
 112                list_add_tail(&pCifsFile->flist,
 113                              &pCifsInode->openFileList);
 114        } else {
 115                list_add(&pCifsFile->flist,
 116                         &pCifsInode->openFileList);
 117        }
 118        write_unlock(&GlobalSMBSeslock);
 119        if (pCifsInode->clientCanCacheRead) {
 120                /* we have the inode open somewhere else
 121                   no need to discard cache data */
 122                goto client_can_cache;
 123        }
 124
 125        /* BB need same check in cifs_create too? */
 126        /* if not oplocked, invalidate inode pages if mtime or file
 127           size changed */
 128        temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
 129        if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
 130                           (file->f_path.dentry->d_inode->i_size ==
 131                            (loff_t)le64_to_cpu(buf->EndOfFile))) {
 132                cFYI(1, ("inode unchanged on server"));
 133        } else {
 134                if (file->f_path.dentry->d_inode->i_mapping) {
 135                /* BB no need to lock inode until after invalidate
 136                   since namei code should already have it locked? */
 137                        rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
 138                        if (rc != 0)
 139                                CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
 140                }
 141                cFYI(1, ("invalidating remote inode since open detected it "
 142                         "changed"));
 143                invalidate_remote_inode(file->f_path.dentry->d_inode);
 144        }
 145
 146client_can_cache:
 147        if (pTcon->unix_ext)
 148                rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
 149                        full_path, inode->i_sb, xid);
 150        else
 151                rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
 152                        full_path, buf, inode->i_sb, xid, NULL);
 153
 154        if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
 155                pCifsInode->clientCanCacheAll = true;
 156                pCifsInode->clientCanCacheRead = true;
 157                cFYI(1, ("Exclusive Oplock granted on inode %p",
 158                         file->f_path.dentry->d_inode));
 159        } else if ((*oplock & 0xF) == OPLOCK_READ)
 160                pCifsInode->clientCanCacheRead = true;
 161
 162        return rc;
 163}
 164
 165int cifs_open(struct inode *inode, struct file *file)
 166{
 167        int rc = -EACCES;
 168        int xid, oplock;
 169        struct cifs_sb_info *cifs_sb;
 170        struct cifsTconInfo *pTcon;
 171        struct cifsFileInfo *pCifsFile;
 172        struct cifsInodeInfo *pCifsInode;
 173        struct list_head *tmp;
 174        char *full_path = NULL;
 175        int desiredAccess;
 176        int disposition;
 177        __u16 netfid;
 178        FILE_ALL_INFO *buf = NULL;
 179
 180        xid = GetXid();
 181
 182        cifs_sb = CIFS_SB(inode->i_sb);
 183        pTcon = cifs_sb->tcon;
 184
 185        if (file->f_flags & O_CREAT) {
 186                /* search inode for this file and fill in file->private_data */
 187                pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
 188                read_lock(&GlobalSMBSeslock);
 189                list_for_each(tmp, &pCifsInode->openFileList) {
 190                        pCifsFile = list_entry(tmp, struct cifsFileInfo,
 191                                               flist);
 192                        if ((pCifsFile->pfile == NULL) &&
 193                            (pCifsFile->pid == current->tgid)) {
 194                                /* mode set in cifs_create */
 195
 196                                /* needed for writepage */
 197                                pCifsFile->pfile = file;
 198
 199                                file->private_data = pCifsFile;
 200                                break;
 201                        }
 202                }
 203                read_unlock(&GlobalSMBSeslock);
 204                if (file->private_data != NULL) {
 205                        rc = 0;
 206                        FreeXid(xid);
 207                        return rc;
 208                } else {
 209                        if (file->f_flags & O_EXCL)
 210                                cERROR(1, ("could not find file instance for "
 211                                           "new file %p", file));
 212                }
 213        }
 214
 215        full_path = build_path_from_dentry(file->f_path.dentry);
 216        if (full_path == NULL) {
 217                FreeXid(xid);
 218                return -ENOMEM;
 219        }
 220
 221        cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
 222                 inode, file->f_flags, full_path));
 223        desiredAccess = cifs_convert_flags(file->f_flags);
 224
 225/*********************************************************************
 226 *  open flag mapping table:
 227 *
 228 *        POSIX Flag            CIFS Disposition
 229 *        ----------            ----------------
 230 *        O_CREAT               FILE_OPEN_IF
 231 *        O_CREAT | O_EXCL      FILE_CREATE
 232 *        O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
 233 *        O_TRUNC               FILE_OVERWRITE
 234 *        none of the above     FILE_OPEN
 235 *
 236 *        Note that there is not a direct match between disposition
 237 *        FILE_SUPERSEDE (ie create whether or not file exists although
 238 *        O_CREAT | O_TRUNC is similar but truncates the existing
 239 *        file rather than creating a new file as FILE_SUPERSEDE does
 240 *        (which uses the attributes / metadata passed in on open call)
 241 *?
 242 *?  O_SYNC is a reasonable match to CIFS writethrough flag
 243 *?  and the read write flags match reasonably.  O_LARGEFILE
 244 *?  is irrelevant because largefile support is always used
 245 *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
 246 *         O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
 247 *********************************************************************/
 248
 249        disposition = cifs_get_disposition(file->f_flags);
 250
 251        if (oplockEnabled)
 252                oplock = REQ_OPLOCK;
 253        else
 254                oplock = 0;
 255
 256        /* BB pass O_SYNC flag through on file attributes .. BB */
 257
 258        /* Also refresh inode by passing in file_info buf returned by SMBOpen
 259           and calling get_inode_info with returned buf (at least helps
 260           non-Unix server case) */
 261
 262        /* BB we can not do this if this is the second open of a file
 263           and the first handle has writebehind data, we might be
 264           able to simply do a filemap_fdatawrite/filemap_fdatawait first */
 265        buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
 266        if (!buf) {
 267                rc = -ENOMEM;
 268                goto out;
 269        }
 270
 271        if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
 272                rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
 273                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
 274                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
 275                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
 276        else
 277                rc = -EIO; /* no NT SMB support fall into legacy open below */
 278
 279        if (rc == -EIO) {
 280                /* Old server, try legacy style OpenX */
 281                rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
 282                        desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
 283                        cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
 284                                & CIFS_MOUNT_MAP_SPECIAL_CHR);
 285        }
 286        if (rc) {
 287                cFYI(1, ("cifs_open returned 0x%x", rc));
 288                goto out;
 289        }
 290        file->private_data =
 291                kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
 292        if (file->private_data == NULL) {
 293                rc = -ENOMEM;
 294                goto out;
 295        }
 296        pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
 297        write_lock(&GlobalSMBSeslock);
 298        list_add(&pCifsFile->tlist, &pTcon->openFileList);
 299
 300        pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
 301        if (pCifsInode) {
 302                rc = cifs_open_inode_helper(inode, file, pCifsInode,
 303                                            pCifsFile, pTcon,
 304                                            &oplock, buf, full_path, xid);
 305        } else {
 306                write_unlock(&GlobalSMBSeslock);
 307        }
 308
 309        if (oplock & CIFS_CREATE_ACTION) {
 310                /* time to set mode which we can not set earlier due to
 311                   problems creating new read-only files */
 312                if (pTcon->unix_ext) {
 313                        struct cifs_unix_set_info_args args = {
 314                                .mode        = inode->i_mode,
 315                                .uid        = NO_CHANGE_64,
 316                                .gid        = NO_CHANGE_64,
 317                                .ctime        = NO_CHANGE_64,
 318                                .atime        = NO_CHANGE_64,
 319                                .mtime        = NO_CHANGE_64,
 320                                .device        = 0,
 321                        };
 322                        CIFSSMBUnixSetInfo(xid, pTcon, full_path, &args,
 323                                            cifs_sb->local_nls,
 324                                            cifs_sb->mnt_cifs_flags &
 325                                                CIFS_MOUNT_MAP_SPECIAL_CHR);
 326                }
 327        }
 328
 329out:
 330        kfree(buf);
 331        kfree(full_path);
 332        FreeXid(xid);
 333        return rc;
 334}
 335
 336/* Try to reacquire byte range locks that were released when session */
 337/* to server was lost */
 338static int cifs_relock_file(struct cifsFileInfo *cifsFile)
 339{
 340        int rc = 0;
 341
 342/* BB list all locks open on this file and relock */
 343
 344        return rc;
 345}
 346
 347static int cifs_reopen_file(struct file *file, bool can_flush)
 348{
 349        int rc = -EACCES;
 350        int xid, oplock;
 351        struct cifs_sb_info *cifs_sb;
 352        struct cifsTconInfo *pTcon;
 353        struct cifsFileInfo *pCifsFile;
 354        struct cifsInodeInfo *pCifsInode;
 355        struct inode *inode;
 356        char *full_path = NULL;
 357        int desiredAccess;
 358        int disposition = FILE_OPEN;
 359        __u16 netfid;
 360
 361        if (file->private_data)
 362                pCifsFile = (struct cifsFileInfo *)file->private_data;
 363        else
 364                return -EBADF;
 365
 366        xid = GetXid();
 367        down(&pCifsFile->fh_sem);
 368        if (!pCifsFile->invalidHandle) {
 369                up(&pCifsFile->fh_sem);
 370                FreeXid(xid);
 371                return 0;
 372        }
 373
 374        if (file->f_path.dentry == NULL) {
 375                cERROR(1, ("no valid name if dentry freed"));
 376                dump_stack();
 377                rc = -EBADF;
 378                goto reopen_error_exit;
 379        }
 380
 381        inode = file->f_path.dentry->d_inode;
 382        if (inode == NULL) {
 383                cERROR(1, ("inode not valid"));
 384                dump_stack();
 385                rc = -EBADF;
 386                goto reopen_error_exit;
 387        }
 388
 389        cifs_sb = CIFS_SB(inode->i_sb);
 390        pTcon = cifs_sb->tcon;
 391
 392/* can not grab rename sem here because various ops, including
 393   those that already have the rename sem can end up causing writepage
 394   to get called and if the server was down that means we end up here,
 395   and we can never tell if the caller already has the rename_sem */
 396        full_path = build_path_from_dentry(file->f_path.dentry);
 397        if (full_path == NULL) {
 398                rc = -ENOMEM;
 399reopen_error_exit:
 400                up(&pCifsFile->fh_sem);
 401                FreeXid(xid);
 402                return rc;
 403        }
 404
 405        cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
 406                 inode, file->f_flags, full_path));
 407        desiredAccess = cifs_convert_flags(file->f_flags);
 408
 409        if (oplockEnabled)
 410                oplock = REQ_OPLOCK;
 411        else
 412                oplock = 0;
 413
 414        /* Can not refresh inode by passing in file_info buf to be returned
 415           by SMBOpen and then calling get_inode_info with returned buf
 416           since file might have write behind data that needs to be flushed
 417           and server version of file size can be stale. If we knew for sure
 418           that inode was not dirty locally we could do this */
 419
 420        rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
 421                         CREATE_NOT_DIR, &netfid, &oplock, NULL,
 422                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
 423                                CIFS_MOUNT_MAP_SPECIAL_CHR);
 424        if (rc) {
 425                up(&pCifsFile->fh_sem);
 426                cFYI(1, ("cifs_open returned 0x%x", rc));
 427                cFYI(1, ("oplock: %d", oplock));
 428        } else {
 429                pCifsFile->netfid = netfid;
 430                pCifsFile->invalidHandle = false;
 431                up(&pCifsFile->fh_sem);
 432                pCifsInode = CIFS_I(inode);
 433                if (pCifsInode) {
 434                        if (can_flush) {
 435                                rc = filemap_write_and_wait(inode->i_mapping);
 436                                if (rc != 0)
 437                                        CIFS_I(inode)->write_behind_rc = rc;
 438                        /* temporarily disable caching while we
 439                           go to server to get inode info */
 440                                pCifsInode->clientCanCacheAll = false;
 441                                pCifsInode->clientCanCacheRead = false;
 442                                if (pTcon->unix_ext)
 443                                        rc = cifs_get_inode_info_unix(&inode,
 444                                                full_path, inode->i_sb, xid);
 445                                else
 446                                        rc = cifs_get_inode_info(&inode,
 447                                                full_path, NULL, inode->i_sb,
 448                                                xid, NULL);
 449                        } /* else we are writing out data to server already
 450                             and could deadlock if we tried to flush data, and
 451                             since we do not know if we have data that would
 452                             invalidate the current end of file on the server
 453                             we can not go to the server to get the new inod
 454                             info */
 455                        if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
 456                                pCifsInode->clientCanCacheAll = true;
 457                                pCifsInode->clientCanCacheRead = true;
 458                                cFYI(1, ("Exclusive Oplock granted on inode %p",
 459                                         file->f_path.dentry->d_inode));
 460                        } else if ((oplock & 0xF) == OPLOCK_READ) {
 461                                pCifsInode->clientCanCacheRead = true;
 462                                pCifsInode->clientCanCacheAll = false;
 463                        } else {
 464                                pCifsInode->clientCanCacheRead = false;
 465                                pCifsInode->clientCanCacheAll = false;
 466                        }
 467                        cifs_relock_file(pCifsFile);
 468                }
 469        }
 470
 471        kfree(full_path);
 472        FreeXid(xid);
 473        return rc;
 474}
 475
 476int cifs_close(struct inode *inode, struct file *file)
 477{
 478        int rc = 0;
 479        int xid, timeout;
 480        struct cifs_sb_info *cifs_sb;
 481        struct cifsTconInfo *pTcon;
 482        struct cifsFileInfo *pSMBFile =
 483                (struct cifsFileInfo *)file->private_data;
 484
 485        xid = GetXid();
 486
 487        cifs_sb = CIFS_SB(inode->i_sb);
 488        pTcon = cifs_sb->tcon;
 489        if (pSMBFile) {
 490                struct cifsLockInfo *li, *tmp;
 491                write_lock(&GlobalSMBSeslock);
 492                pSMBFile->closePend = true;
 493                if (pTcon) {
 494                        /* no sense reconnecting to close a file that is
 495                           already closed */
 496                        if (!pTcon->need_reconnect) {
 497                                write_unlock(&GlobalSMBSeslock);
 498                                timeout = 2;
 499                                while ((atomic_read(&pSMBFile->wrtPending) != 0)
 500                                        && (timeout <= 2048)) {
 501                                        /* Give write a better chance to get to
 502                                        server ahead of the close.  We do not
 503                                        want to add a wait_q here as it would
 504                                        increase the memory utilization as
 505                                        the struct would be in each open file,
 506                                        but this should give enough time to
 507                                        clear the socket */
 508                                        cFYI(DBG2,
 509                                                ("close delay, write pending"));
 510                                        msleep(timeout);
 511                                        timeout *= 4;
 512                                }
 513                                if (atomic_read(&pSMBFile->wrtPending))
 514                                        cERROR(1, ("close with pending write"));
 515                                if (!pTcon->need_reconnect &&
 516                                    !pSMBFile->invalidHandle)
 517                                        rc = CIFSSMBClose(xid, pTcon,
 518                                                  pSMBFile->netfid);
 519                        } else
 520                                write_unlock(&GlobalSMBSeslock);
 521                } else
 522                        write_unlock(&GlobalSMBSeslock);
 523
 524                /* Delete any outstanding lock records.
 525                   We'll lose them when the file is closed anyway. */
 526                mutex_lock(&pSMBFile->lock_mutex);
 527                list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
 528                        list_del(&li->llist);
 529                        kfree(li);
 530                }
 531                mutex_unlock(&pSMBFile->lock_mutex);
 532
 533                write_lock(&GlobalSMBSeslock);
 534                list_del(&pSMBFile->flist);
 535                list_del(&pSMBFile->tlist);
 536                write_unlock(&GlobalSMBSeslock);
 537                timeout = 10;
 538                /* We waited above to give the SMBWrite a chance to issue
 539                   on the wire (so we do not get SMBWrite returning EBADF
 540                   if writepages is racing with close.  Note that writepages
 541                   does not specify a file handle, so it is possible for a file
 542                   to be opened twice, and the application close the "wrong"
 543                   file handle - in these cases we delay long enough to allow
 544                   the SMBWrite to get on the wire before the SMB Close.
 545                   We allow total wait here over 45 seconds, more than
 546                   oplock break time, and more than enough to allow any write
 547                   to complete on the server, or to time out on the client */
 548                while ((atomic_read(&pSMBFile->wrtPending) != 0)
 549                                && (timeout <= 50000)) {
 550                        cERROR(1, ("writes pending, delay free of handle"));
 551                        msleep(timeout);
 552                        timeout *= 8;
 553                }
 554                kfree(file->private_data);
 555                file->private_data = NULL;
 556        } else
 557                rc = -EBADF;
 558
 559        read_lock(&GlobalSMBSeslock);
 560        if (list_empty(&(CIFS_I(inode)->openFileList))) {
 561                cFYI(1, ("closing last open instance for inode %p", inode));
 562                /* if the file is not open we do not know if we can cache info
 563                   on this inode, much less write behind and read ahead */
 564                CIFS_I(inode)->clientCanCacheRead = false;
 565                CIFS_I(inode)->clientCanCacheAll  = false;
 566        }
 567        read_unlock(&GlobalSMBSeslock);
 568        if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
 569                rc = CIFS_I(inode)->write_behind_rc;
 570        FreeXid(xid);
 571        return rc;
 572}
 573
 574int cifs_closedir(struct inode *inode, struct file *file)
 575{
 576        int rc = 0;
 577        int xid;
 578        struct cifsFileInfo *pCFileStruct =
 579            (struct cifsFileInfo *)file->private_data;
 580        char *ptmp;
 581
 582        cFYI(1, ("Closedir inode = 0x%p", inode));
 583
 584        xid = GetXid();
 585
 586        if (pCFileStruct) {
 587                struct cifsTconInfo *pTcon;
 588                struct cifs_sb_info *cifs_sb =
 589                        CIFS_SB(file->f_path.dentry->d_sb);
 590
 591                pTcon = cifs_sb->tcon;
 592
 593                cFYI(1, ("Freeing private data in close dir"));
 594                write_lock(&GlobalSMBSeslock);
 595                if (!pCFileStruct->srch_inf.endOfSearch &&
 596                    !pCFileStruct->invalidHandle) {
 597                        pCFileStruct->invalidHandle = true;
 598                        write_unlock(&GlobalSMBSeslock);
 599                        rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
 600                        cFYI(1, ("Closing uncompleted readdir with rc %d",
 601                                 rc));
 602                        /* not much we can do if it fails anyway, ignore rc */
 603                        rc = 0;
 604                } else
 605                        write_unlock(&GlobalSMBSeslock);
 606                ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
 607                if (ptmp) {
 608                        cFYI(1, ("closedir free smb buf in srch struct"));
 609                        pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
 610                        if (pCFileStruct->srch_inf.smallBuf)
 611                                cifs_small_buf_release(ptmp);
 612                        else
 613                                cifs_buf_release(ptmp);
 614                }
 615                kfree(file->private_data);
 616                file->private_data = NULL;
 617        }
 618        /* BB can we lock the filestruct while this is going on? */
 619        FreeXid(xid);
 620        return rc;
 621}
 622
 623static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
 624                                __u64 offset, __u8 lockType)
 625{
 626        struct cifsLockInfo *li =
 627                kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
 628        if (li == NULL)
 629                return -ENOMEM;
 630        li->offset = offset;
 631        li->length = len;
 632        li->type = lockType;
 633        mutex_lock(&fid->lock_mutex);
 634        list_add(&li->llist, &fid->llist);
 635        mutex_unlock(&fid->lock_mutex);
 636        return 0;
 637}
 638
 639int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
 640{
 641        int rc, xid;
 642        __u32 numLock = 0;
 643        __u32 numUnlock = 0;
 644        __u64 length;
 645        bool wait_flag = false;
 646        struct cifs_sb_info *cifs_sb;
 647        struct cifsTconInfo *pTcon;
 648        __u16 netfid;
 649        __u8 lockType = LOCKING_ANDX_LARGE_FILES;
 650        bool posix_locking;
 651
 652        length = 1 + pfLock->fl_end - pfLock->fl_start;
 653        rc = -EACCES;
 654        xid = GetXid();
 655
 656        cFYI(1, ("Lock parm: 0x%x flockflags: "
 657                 "0x%x flocktype: 0x%x start: %lld end: %lld",
 658                cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
 659                pfLock->fl_end));
 660
 661        if (pfLock->fl_flags & FL_POSIX)
 662                cFYI(1, ("Posix"));
 663        if (pfLock->fl_flags & FL_FLOCK)
 664                cFYI(1, ("Flock"));
 665        if (pfLock->fl_flags & FL_SLEEP) {
 666                cFYI(1, ("Blocking lock"));
 667                wait_flag = true;
 668        }
 669        if (pfLock->fl_flags & FL_ACCESS)
 670                cFYI(1, ("Process suspended by mandatory locking - "
 671                         "not implemented yet"));
 672        if (pfLock->fl_flags & FL_LEASE)
 673                cFYI(1, ("Lease on file - not implemented yet"));
 674        if (pfLock->fl_flags &
 675            (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
 676                cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
 677
 678        if (pfLock->fl_type == F_WRLCK) {
 679                cFYI(1, ("F_WRLCK "));
 680                numLock = 1;
 681        } else if (pfLock->fl_type == F_UNLCK) {
 682                cFYI(1, ("F_UNLCK"));
 683                numUnlock = 1;
 684                /* Check if unlock includes more than
 685                one lock range */
 686        } else if (pfLock->fl_type == F_RDLCK) {
 687                cFYI(1, ("F_RDLCK"));
 688                lockType |= LOCKING_ANDX_SHARED_LOCK;
 689                numLock = 1;
 690        } else if (pfLock->fl_type == F_EXLCK) {
 691                cFYI(1, ("F_EXLCK"));
 692                numLock = 1;
 693        } else if (pfLock->fl_type == F_SHLCK) {
 694                cFYI(1, ("F_SHLCK"));
 695                lockType |= LOCKING_ANDX_SHARED_LOCK;
 696                numLock = 1;
 697        } else
 698                cFYI(1, ("Unknown type of lock"));
 699
 700        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 701        pTcon = cifs_sb->tcon;
 702
 703        if (file->private_data == NULL) {
 704                FreeXid(xid);
 705                return -EBADF;
 706        }
 707        netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
 708
 709        posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
 710                        (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
 711
 712        /* BB add code here to normalize offset and length to
 713        account for negative length which we can not accept over the
 714        wire */
 715        if (IS_GETLK(cmd)) {
 716                if (posix_locking) {
 717                        int posix_lock_type;
 718                        if (lockType & LOCKING_ANDX_SHARED_LOCK)
 719                                posix_lock_type = CIFS_RDLCK;
 720                        else
 721                                posix_lock_type = CIFS_WRLCK;
 722                        rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
 723                                        length,        pfLock,
 724                                        posix_lock_type, wait_flag);
 725                        FreeXid(xid);
 726                        return rc;
 727                }
 728
 729                /* BB we could chain these into one lock request BB */
 730                rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
 731                                 0, 1, lockType, 0 /* wait flag */ );
 732                if (rc == 0) {
 733                        rc = CIFSSMBLock(xid, pTcon, netfid, length,
 734                                         pfLock->fl_start, 1 /* numUnlock */ ,
 735                                         0 /* numLock */ , lockType,
 736                                         0 /* wait flag */ );
 737                        pfLock->fl_type = F_UNLCK;
 738                        if (rc != 0)
 739                                cERROR(1, ("Error unlocking previously locked "
 740                                           "range %d during test of lock", rc));
 741                        rc = 0;
 742
 743                } else {
 744                        /* if rc == ERR_SHARING_VIOLATION ? */
 745                        rc = 0;        /* do not change lock type to unlock
 746                                   since range in use */
 747                }
 748
 749                FreeXid(xid);
 750                return rc;
 751        }
 752
 753        if (!numLock && !numUnlock) {
 754                /* if no lock or unlock then nothing
 755                to do since we do not know what it is */
 756                FreeXid(xid);
 757                return -EOPNOTSUPP;
 758        }
 759
 760        if (posix_locking) {
 761                int posix_lock_type;
 762                if (lockType & LOCKING_ANDX_SHARED_LOCK)
 763                        posix_lock_type = CIFS_RDLCK;
 764                else
 765                        posix_lock_type = CIFS_WRLCK;
 766
 767                if (numUnlock == 1)
 768                        posix_lock_type = CIFS_UNLCK;
 769
 770                rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
 771                                      length, pfLock,
 772                                      posix_lock_type, wait_flag);
 773        } else {
 774                struct cifsFileInfo *fid =
 775                        (struct cifsFileInfo *)file->private_data;
 776
 777                if (numLock) {
 778                        rc = CIFSSMBLock(xid, pTcon, netfid, length,
 779                                        pfLock->fl_start,
 780                                        0, numLock, lockType, wait_flag);
 781
 782                        if (rc == 0) {
 783                                /* For Windows locks we must store them. */
 784                                rc = store_file_lock(fid, length,
 785                                                pfLock->fl_start, lockType);
 786                        }
 787                } else if (numUnlock) {
 788                        /* For each stored lock that this unlock overlaps
 789                           completely, unlock it. */
 790                        int stored_rc = 0;
 791                        struct cifsLockInfo *li, *tmp;
 792
 793                        rc = 0;
 794                        mutex_lock(&fid->lock_mutex);
 795                        list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
 796                                if (pfLock->fl_start <= li->offset &&
 797                                                (pfLock->fl_start + length) >=
 798                                                (li->offset + li->length)) {
 799                                        stored_rc = CIFSSMBLock(xid, pTcon,
 800                                                        netfid,
 801                                                        li->length, li->offset,
 802                                                        1, 0, li->type, false);
 803                                        if (stored_rc)
 804                                                rc = stored_rc;
 805
 806                                        list_del(&li->llist);
 807                                        kfree(li);
 808                                }
 809                        }
 810                        mutex_unlock(&fid->lock_mutex);
 811                }
 812        }
 813
 814        if (pfLock->fl_flags & FL_POSIX)
 815                posix_lock_file_wait(file, pfLock);
 816        FreeXid(xid);
 817        return rc;
 818}
 819
 820ssize_t cifs_user_write(struct file *file, const char __user *write_data,
 821        size_t write_size, loff_t *poffset)
 822{
 823        int rc = 0;
 824        unsigned int bytes_written = 0;
 825        unsigned int total_written;
 826        struct cifs_sb_info *cifs_sb;
 827        struct cifsTconInfo *pTcon;
 828        int xid, long_op;
 829        struct cifsFileInfo *open_file;
 830
 831        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 832
 833        pTcon = cifs_sb->tcon;
 834
 835        /* cFYI(1,
 836           (" write %d bytes to offset %lld of %s", write_size,
 837           *poffset, file->f_path.dentry->d_name.name)); */
 838
 839        if (file->private_data == NULL)
 840                return -EBADF;
 841        open_file = (struct cifsFileInfo *) file->private_data;
 842
 843        rc = generic_write_checks(file, poffset, &write_size, 0);
 844        if (rc)
 845                return rc;
 846
 847        xid = GetXid();
 848
 849        if (*poffset > file->f_path.dentry->d_inode->i_size)
 850                long_op = CIFS_VLONG_OP; /* writes past EOF take long time */
 851        else
 852                long_op = CIFS_LONG_OP;
 853
 854        for (total_written = 0; write_size > total_written;
 855             total_written += bytes_written) {
 856                rc = -EAGAIN;
 857                while (rc == -EAGAIN) {
 858                        if (file->private_data == NULL) {
 859                                /* file has been closed on us */
 860                                FreeXid(xid);
 861                        /* if we have gotten here we have written some data
 862                           and blocked, and the file has been freed on us while
 863                           we blocked so return what we managed to write */
 864                                return total_written;
 865                        }
 866                        if (open_file->closePend) {
 867                                FreeXid(xid);
 868                                if (total_written)
 869                                        return total_written;
 870                                else
 871                                        return -EBADF;
 872                        }
 873                        if (open_file->invalidHandle) {
 874                                /* we could deadlock if we called
 875                                   filemap_fdatawait from here so tell
 876                                   reopen_file not to flush data to server
 877                                   now */
 878                                rc = cifs_reopen_file(file, false);
 879                                if (rc != 0)
 880                                        break;
 881                        }
 882
 883                        rc = CIFSSMBWrite(xid, pTcon,
 884                                open_file->netfid,
 885                                min_t(const int, cifs_sb->wsize,
 886                                      write_size - total_written),
 887                                *poffset, &bytes_written,
 888                                NULL, write_data + total_written, long_op);
 889                }
 890                if (rc || (bytes_written == 0)) {
 891                        if (total_written)
 892                                break;
 893                        else {
 894                                FreeXid(xid);
 895                                return rc;
 896                        }
 897                } else
 898                        *poffset += bytes_written;
 899                long_op = CIFS_STD_OP; /* subsequent writes fast -
 900                                    15 seconds is plenty */
 901        }
 902
 903        cifs_stats_bytes_written(pTcon, total_written);
 904
 905        /* since the write may have blocked check these pointers again */
 906        if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
 907                struct inode *inode = file->f_path.dentry->d_inode;
 908/* Do not update local mtime - server will set its actual value on write
 909 *                inode->i_ctime = inode->i_mtime =
 910 *                         current_fs_time(inode->i_sb);*/
 911                if (total_written > 0) {
 912                        spin_lock(&inode->i_lock);
 913                        if (*poffset > file->f_path.dentry->d_inode->i_size)
 914                                i_size_write(file->f_path.dentry->d_inode,
 915                                        *poffset);
 916                        spin_unlock(&inode->i_lock);
 917                }
 918                mark_inode_dirty_sync(file->f_path.dentry->d_inode);
 919        }
 920        FreeXid(xid);
 921        return total_written;
 922}
 923
 924static ssize_t cifs_write(struct file *file, const char *write_data,
 925                          size_t write_size, loff_t *poffset)
 926{
 927        int rc = 0;
 928        unsigned int bytes_written = 0;
 929        unsigned int total_written;
 930        struct cifs_sb_info *cifs_sb;
 931        struct cifsTconInfo *pTcon;
 932        int xid, long_op;
 933        struct cifsFileInfo *open_file;
 934
 935        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 936
 937        pTcon = cifs_sb->tcon;
 938
 939        cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
 940           *poffset, file->f_path.dentry->d_name.name));
 941
 942        if (file->private_data == NULL)
 943                return -EBADF;
 944        open_file = (struct cifsFileInfo *)file->private_data;
 945
 946        xid = GetXid();
 947
 948        if (*poffset > file->f_path.dentry->d_inode->i_size)
 949                long_op = CIFS_VLONG_OP; /* writes past EOF can be slow */
 950        else
 951                long_op = CIFS_LONG_OP;
 952
 953        for (total_written = 0; write_size > total_written;
 954             total_written += bytes_written) {
 955                rc = -EAGAIN;
 956                while (rc == -EAGAIN) {
 957                        if (file->private_data == NULL) {
 958                                /* file has been closed on us */
 959                                FreeXid(xid);
 960                        /* if we have gotten here we have written some data
 961                           and blocked, and the file has been freed on us
 962                           while we blocked so return what we managed to
 963                           write */
 964                                return total_written;
 965                        }
 966                        if (open_file->closePend) {
 967                                FreeXid(xid);
 968                                if (total_written)
 969                                        return total_written;
 970                                else
 971                                        return -EBADF;
 972                        }
 973                        if (open_file->invalidHandle) {
 974                                /* we could deadlock if we called
 975                                   filemap_fdatawait from here so tell
 976                                   reopen_file not to flush data to
 977                                   server now */
 978                                rc = cifs_reopen_file(file, false);
 979                                if (rc != 0)
 980                                        break;
 981                        }
 982                        if (experimEnabled || (pTcon->ses->server &&
 983                                ((pTcon->ses->server->secMode &
 984                                (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
 985                                == 0))) {
 986                                struct kvec iov[2];
 987                                unsigned int len;
 988
 989                                len = min((size_t)cifs_sb->wsize,
 990                                          write_size - total_written);
 991                                /* iov[0] is reserved for smb header */
 992                                iov[1].iov_base = (char *)write_data +
 993                                                  total_written;
 994                                iov[1].iov_len = len;
 995                                rc = CIFSSMBWrite2(xid, pTcon,
 996                                                open_file->netfid, len,
 997                                                *poffset, &bytes_written,
 998                                                iov, 1, long_op);
 999                        } else
1000                                rc = CIFSSMBWrite(xid, pTcon,
1001                                         open_file->netfid,
1002                                         min_t(const int, cifs_sb->wsize,
1003                                               write_size - total_written),
1004                                         *poffset, &bytes_written,
1005                                         write_data + total_written,
1006                                         NULL, long_op);
1007                }
1008                if (rc || (bytes_written == 0)) {
1009                        if (total_written)
1010                                break;
1011                        else {
1012                                FreeXid(xid);
1013                                return rc;
1014                        }
1015                } else
1016                        *poffset += bytes_written;
1017                long_op = CIFS_STD_OP; /* subsequent writes fast -
1018                                    15 seconds is plenty */
1019        }
1020
1021        cifs_stats_bytes_written(pTcon, total_written);
1022
1023        /* since the write may have blocked check these pointers again */
1024        if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1025/*BB We could make this contingent on superblock ATIME flag too */
1026/*                file->f_path.dentry->d_inode->i_ctime =
1027                file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1028                if (total_written > 0) {
1029                        spin_lock(&file->f_path.dentry->d_inode->i_lock);
1030                        if (*poffset > file->f_path.dentry->d_inode->i_size)
1031                                i_size_write(file->f_path.dentry->d_inode,
1032                                             *poffset);
1033                        spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1034                }
1035                mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1036        }
1037        FreeXid(xid);
1038        return total_written;
1039}
1040
1041#ifdef CONFIG_CIFS_EXPERIMENTAL
1042struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1043{
1044        struct cifsFileInfo *open_file = NULL;
1045
1046        read_lock(&GlobalSMBSeslock);
1047        /* we could simply get the first_list_entry since write-only entries
1048           are always at the end of the list but since the first entry might
1049           have a close pending, we go through the whole list */
1050        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1051                if (open_file->closePend)
1052                        continue;
1053                if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1054                    (open_file->pfile->f_flags & O_RDONLY))) {
1055                        if (!open_file->invalidHandle) {
1056                                /* found a good file */
1057                                /* lock it so it will not be closed on us */
1058                                atomic_inc(&open_file->wrtPending);
1059                                read_unlock(&GlobalSMBSeslock);
1060                                return open_file;
1061                        } /* else might as well continue, and look for
1062                             another, or simply have the caller reopen it
1063                             again rather than trying to fix this handle */
1064                } else /* write only file */
1065                        break; /* write only files are last so must be done */
1066        }
1067        read_unlock(&GlobalSMBSeslock);
1068        return NULL;
1069}
1070#endif
1071
1072struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1073{
1074        struct cifsFileInfo *open_file;
1075        bool any_available = false;
1076        int rc;
1077
1078        /* Having a null inode here (because mapping->host was set to zero by
1079        the VFS or MM) should not happen but we had reports of on oops (due to
1080        it being zero) during stress testcases so we need to check for it */
1081
1082        if (cifs_inode == NULL) {
1083                cERROR(1, ("Null inode passed to cifs_writeable_file"));
1084                dump_stack();
1085                return NULL;
1086        }
1087
1088        read_lock(&GlobalSMBSeslock);
1089refind_writable:
1090        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1091                if (open_file->closePend ||
1092                    (!any_available && open_file->pid != current->tgid))
1093                        continue;
1094
1095                if (open_file->pfile &&
1096                    ((open_file->pfile->f_flags & O_RDWR) ||
1097                     (open_file->pfile->f_flags & O_WRONLY))) {
1098                        atomic_inc(&open_file->wrtPending);
1099
1100                        if (!open_file->invalidHandle) {
1101                                /* found a good writable file */
1102                                read_unlock(&GlobalSMBSeslock);
1103                                return open_file;
1104                        }
1105
1106                        read_unlock(&GlobalSMBSeslock);
1107                        /* Had to unlock since following call can block */
1108                        rc = cifs_reopen_file(open_file->pfile, false);
1109                        if (!rc) {
1110                                if (!open_file->closePend)
1111                                        return open_file;
1112                                else { /* start over in case this was deleted */
1113                                       /* since the list could be modified */
1114                                        read_lock(&GlobalSMBSeslock);
1115                                        atomic_dec(&open_file->wrtPending);
1116                                        goto refind_writable;
1117                                }
1118                        }
1119
1120                        /* if it fails, try another handle if possible -
1121                        (we can not do this if closePending since
1122                        loop could be modified - in which case we
1123                        have to start at the beginning of the list
1124                        again. Note that it would be bad
1125                        to hold up writepages here (rather than
1126                        in caller) with continuous retries */
1127                        cFYI(1, ("wp failed on reopen file"));
1128                        read_lock(&GlobalSMBSeslock);
1129                        /* can not use this handle, no write
1130                           pending on this one after all */
1131                        atomic_dec(&open_file->wrtPending);
1132
1133                        if (open_file->closePend) /* list could have changed */
1134                                goto refind_writable;
1135                        /* else we simply continue to the next entry. Thus
1136                           we do not loop on reopen errors.  If we
1137                           can not reopen the file, for example if we
1138                           reconnected to a server with another client
1139                           racing to delete or lock the file we would not
1140                           make progress if we restarted before the beginning
1141                           of the loop here. */
1142                }
1143        }
1144        /* couldn't find useable FH with same pid, try any available */
1145        if (!any_available) {
1146                any_available = true;
1147                goto refind_writable;
1148        }
1149        read_unlock(&GlobalSMBSeslock);
1150        return NULL;
1151}
1152
1153static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1154{
1155        struct address_space *mapping = page->mapping;
1156        loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1157        char *write_data;
1158        int rc = -EFAULT;
1159        int bytes_written = 0;
1160        struct cifs_sb_info *cifs_sb;
1161        struct cifsTconInfo *pTcon;
1162        struct inode *inode;
1163        struct cifsFileInfo *open_file;
1164
1165        if (!mapping || !mapping->host)
1166                return -EFAULT;
1167
1168        inode = page->mapping->host;
1169        cifs_sb = CIFS_SB(inode->i_sb);
1170        pTcon = cifs_sb->tcon;
1171
1172        offset += (loff_t)from;
1173        write_data = kmap(page);
1174        write_data += from;
1175
1176        if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1177                kunmap(page);
1178                return -EIO;
1179        }
1180
1181        /* racing with truncate? */
1182        if (offset > mapping->host->i_size) {
1183                kunmap(page);
1184                return 0; /* don't care */
1185        }
1186
1187        /* check to make sure that we are not extending the file */
1188        if (mapping->host->i_size - offset < (loff_t)to)
1189                to = (unsigned)(mapping->host->i_size - offset);
1190
1191        open_file = find_writable_file(CIFS_I(mapping->host));
1192        if (open_file) {
1193                bytes_written = cifs_write(open_file->pfile, write_data,
1194                                           to-from, &offset);
1195                atomic_dec(&open_file->wrtPending);
1196                /* Does mm or vfs already set times? */
1197                inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1198                if ((bytes_written > 0) && (offset))
1199                        rc = 0;
1200                else if (bytes_written < 0)
1201                        rc = bytes_written;
1202        } else {
1203                cFYI(1, ("No writeable filehandles for inode"));
1204                rc = -EIO;
1205        }
1206
1207        kunmap(page);
1208        return rc;
1209}
1210
1211static int cifs_writepages(struct address_space *mapping,
1212                           struct writeback_control *wbc)
1213{
1214        struct backing_dev_info *bdi = mapping->backing_dev_info;
1215        unsigned int bytes_to_write;
1216        unsigned int bytes_written;
1217        struct cifs_sb_info *cifs_sb;
1218        int done = 0;
1219        pgoff_t end;
1220        pgoff_t index;
1221        int range_whole = 0;
1222        struct kvec *iov;
1223        int len;
1224        int n_iov = 0;
1225        pgoff_t next;
1226        int nr_pages;
1227        __u64 offset = 0;
1228        struct cifsFileInfo *open_file;
1229        struct page *page;
1230        struct pagevec pvec;
1231        int rc = 0;
1232        int scanned = 0;
1233        int xid;
1234
1235        cifs_sb = CIFS_SB(mapping->host->i_sb);
1236
1237        /*
1238         * If wsize is smaller that the page cache size, default to writing
1239         * one page at a time via cifs_writepage
1240         */
1241        if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1242                return generic_writepages(mapping, wbc);
1243
1244        if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1245                if (cifs_sb->tcon->ses->server->secMode &
1246                                (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1247                        if (!experimEnabled)
1248                                return generic_writepages(mapping, wbc);
1249
1250        iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1251        if (iov == NULL)
1252                return generic_writepages(mapping, wbc);
1253
1254
1255        /*
1256         * BB: Is this meaningful for a non-block-device file system?
1257         * If it is, we should test it again after we do I/O
1258         */
1259        if (wbc->nonblocking && bdi_write_congested(bdi)) {
1260                wbc->encountered_congestion = 1;
1261                kfree(iov);
1262                return 0;
1263        }
1264
1265        xid = GetXid();
1266
1267        pagevec_init(&pvec, 0);
1268        if (wbc->range_cyclic) {
1269                index = mapping->writeback_index; /* Start from prev offset */
1270                end = -1;
1271        } else {
1272                index = wbc->range_start >> PAGE_CACHE_SHIFT;
1273                end = wbc->range_end >> PAGE_CACHE_SHIFT;
1274                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1275                        range_whole = 1;
1276                scanned = 1;
1277        }
1278retry:
1279        while (!done && (index <= end) &&
1280               (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1281                        PAGECACHE_TAG_DIRTY,
1282                        min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1283                int first;
1284                unsigned int i;
1285
1286                first = -1;
1287                next = 0;
1288                n_iov = 0;
1289                bytes_to_write = 0;
1290
1291                for (i = 0; i < nr_pages; i++) {
1292                        page = pvec.pages[i];
1293                        /*
1294                         * At this point we hold neither mapping->tree_lock nor
1295                         * lock on the page itself: the page may be truncated or
1296                         * invalidated (changing page->mapping to NULL), or even
1297                         * swizzled back from swapper_space to tmpfs file
1298                         * mapping
1299                         */
1300
1301                        if (first < 0)
1302                                lock_page(page);
1303                        else if (!trylock_page(page))
1304                                break;
1305
1306                        if (unlikely(page->mapping != mapping)) {
1307                                unlock_page(page);
1308                                break;
1309                        }
1310
1311                        if (!wbc->range_cyclic && page->index > end) {
1312                                done = 1;
1313                                unlock_page(page);
1314                                break;
1315                        }
1316
1317                        if (next && (page->index != next)) {
1318                                /* Not next consecutive page */
1319                                unlock_page(page);
1320                                break;
1321                        }
1322
1323                        if (wbc->sync_mode != WB_SYNC_NONE)
1324                                wait_on_page_writeback(page);
1325
1326                        if (PageWriteback(page) ||
1327                                        !clear_page_dirty_for_io(page)) {
1328                                unlock_page(page);
1329                                break;
1330                        }
1331
1332                        /*
1333                         * This actually clears the dirty bit in the radix tree.
1334                         * See cifs_writepage() for more commentary.
1335                         */
1336                        set_page_writeback(page);
1337
1338                        if (page_offset(page) >= mapping->host->i_size) {
1339                                done = 1;
1340                                unlock_page(page);
1341                                end_page_writeback(page);
1342                                break;
1343                        }
1344
1345                        /*
1346                         * BB can we get rid of this?  pages are held by pvec
1347                         */
1348                        page_cache_get(page);
1349
1350                        len = min(mapping->host->i_size - page_offset(page),
1351                                  (loff_t)PAGE_CACHE_SIZE);
1352
1353                        /* reserve iov[0] for the smb header */
1354                        n_iov++;
1355                        iov[n_iov].iov_base = kmap(page);
1356                        iov[n_iov].iov_len = len;
1357                        bytes_to_write += len;
1358
1359                        if (first < 0) {
1360                                first = i;
1361                                offset = page_offset(page);
1362                        }
1363                        next = page->index + 1;
1364                        if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1365                                break;
1366                }
1367                if (n_iov) {
1368                        /* Search for a writable handle every time we call
1369                         * CIFSSMBWrite2.  We can't rely on the last handle
1370                         * we used to still be valid
1371                         */
1372                        open_file = find_writable_file(CIFS_I(mapping->host));
1373                        if (!open_file) {
1374                                cERROR(1, ("No writable handles for inode"));
1375                                rc = -EBADF;
1376                        } else {
1377                                rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1378                                                   open_file->netfid,
1379                                                   bytes_to_write, offset,
1380                                                   &bytes_written, iov, n_iov,
1381                                                   CIFS_LONG_OP);
1382                                atomic_dec(&open_file->wrtPending);
1383                                if (rc || bytes_written < bytes_to_write) {
1384                                        cERROR(1, ("Write2 ret %d, wrote %d",
1385                                                  rc, bytes_written));
1386                                        /* BB what if continued retry is
1387                                           requested via mount flags? */
1388                                        if (rc == -ENOSPC)
1389                                                set_bit(AS_ENOSPC, &mapping->flags);
1390                                        else
1391                                                set_bit(AS_EIO, &mapping->flags);
1392                                } else {
1393                                        cifs_stats_bytes_written(cifs_sb->tcon,
1394                                                                 bytes_written);
1395                                }
1396                        }
1397                        for (i = 0; i < n_iov; i++) {
1398                                page = pvec.pages[first + i];
1399                                /* Should we also set page error on
1400                                success rc but too little data written? */
1401                                /* BB investigate retry logic on temporary
1402                                server crash cases and how recovery works
1403                                when page marked as error */
1404                                if (rc)
1405                                        SetPageError(page);
1406                                kunmap(page);
1407                                unlock_page(page);
1408                                end_page_writeback(page);
1409                                page_cache_release(page);
1410                        }
1411                        if ((wbc->nr_to_write -= n_iov) <= 0)
1412                                done = 1;
1413                        index = next;
1414                } else
1415                        /* Need to re-find the pages we skipped */
1416                        index = pvec.pages[0]->index + 1;
1417
1418                pagevec_release(&pvec);
1419        }
1420        if (!scanned && !done) {
1421                /*
1422                 * We hit the last page and there is more work to be done: wrap
1423                 * back to the start of the file
1424                 */
1425                scanned = 1;
1426                index = 0;
1427                goto retry;
1428        }
1429        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1430                mapping->writeback_index = index;
1431
1432        FreeXid(xid);
1433        kfree(iov);
1434        return rc;
1435}
1436
1437static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1438{
1439        int rc = -EFAULT;
1440        int xid;
1441
1442        xid = GetXid();
1443/* BB add check for wbc flags */
1444        page_cache_get(page);
1445        if (!PageUptodate(page))
1446                cFYI(1, ("ppw - page not up to date"));
1447
1448        /*
1449         * Set the "writeback" flag, and clear "dirty" in the radix tree.
1450         *
1451         * A writepage() implementation always needs to do either this,
1452         * or re-dirty the page with "redirty_page_for_writepage()" in
1453         * the case of a failure.
1454         *
1455         * Just unlocking the page will cause the radix tree tag-bits
1456         * to fail to update with the state of the page correctly.
1457         */
1458        set_page_writeback(page);
1459        rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1460        SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1461        unlock_page(page);
1462        end_page_writeback(page);
1463        page_cache_release(page);
1464        FreeXid(xid);
1465        return rc;
1466}
1467
1468static int cifs_write_end(struct file *file, struct address_space *mapping,
1469                        loff_t pos, unsigned len, unsigned copied,
1470                        struct page *page, void *fsdata)
1471{
1472        int rc;
1473        struct inode *inode = mapping->host;
1474
1475        cFYI(1, ("write_end for page %p from pos %lld with %d bytes",
1476                 page, pos, copied));
1477
1478        if (PageChecked(page)) {
1479                if (copied == len)
1480                        SetPageUptodate(page);
1481                ClearPageChecked(page);
1482        } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE)
1483                SetPageUptodate(page);
1484
1485        if (!PageUptodate(page)) {
1486                char *page_data;
1487                unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
1488                int xid;
1489
1490                xid = GetXid();
1491                /* this is probably better than directly calling
1492                   partialpage_write since in this function the file handle is
1493                   known which we might as well        leverage */
1494                /* BB check if anything else missing out of ppw
1495                   such as updating last write time */
1496                page_data = kmap(page);
1497                rc = cifs_write(file, page_data + offset, copied, &pos);
1498                /* if (rc < 0) should we set writebehind rc? */
1499                kunmap(page);
1500
1501                FreeXid(xid);
1502        } else {
1503                rc = copied;
1504                pos += copied;
1505                set_page_dirty(page);
1506        }
1507
1508        if (rc > 0) {
1509                spin_lock(&inode->i_lock);
1510                if (pos > inode->i_size)
1511                        i_size_write(inode, pos);
1512                spin_unlock(&inode->i_lock);
1513        }
1514
1515        unlock_page(page);
1516        page_cache_release(page);
1517
1518        return rc;
1519}
1520
1521int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1522{
1523        int xid;
1524        int rc = 0;
1525        struct inode *inode = file->f_path.dentry->d_inode;
1526
1527        xid = GetXid();
1528
1529        cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1530                dentry->d_name.name, datasync));
1531
1532        rc = filemap_write_and_wait(inode->i_mapping);
1533        if (rc == 0) {
1534                rc = CIFS_I(inode)->write_behind_rc;
1535                CIFS_I(inode)->write_behind_rc = 0;
1536        }
1537        FreeXid(xid);
1538        return rc;
1539}
1540
1541/* static void cifs_sync_page(struct page *page)
1542{
1543        struct address_space *mapping;
1544        struct inode *inode;
1545        unsigned long index = page->index;
1546        unsigned int rpages = 0;
1547        int rc = 0;
1548
1549        cFYI(1, ("sync page %p",page));
1550        mapping = page->mapping;
1551        if (!mapping)
1552                return 0;
1553        inode = mapping->host;
1554        if (!inode)
1555                return; */
1556
1557/*        fill in rpages then
1558        result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1559
1560/*        cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1561
1562#if 0
1563        if (rc < 0)
1564                return rc;
1565        return 0;
1566#endif
1567} */
1568
1569/*
1570 * As file closes, flush all cached write data for this inode checking
1571 * for write behind errors.
1572 */
1573int cifs_flush(struct file *file, fl_owner_t id)
1574{
1575        struct inode *inode = file->f_path.dentry->d_inode;
1576        int rc = 0;
1577
1578        /* Rather than do the steps manually:
1579           lock the inode for writing
1580           loop through pages looking for write behind data (dirty pages)
1581           coalesce into contiguous 16K (or smaller) chunks to write to server
1582           send to server (prefer in parallel)
1583           deal with writebehind errors
1584           unlock inode for writing
1585           filemapfdatawrite appears easier for the time being */
1586
1587        rc = filemap_fdatawrite(inode->i_mapping);
1588        /* reset wb rc if we were able to write out dirty pages */
1589        if (!rc) {
1590                rc = CIFS_I(inode)->write_behind_rc;
1591                CIFS_I(inode)->write_behind_rc = 0;
1592        }
1593
1594        cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
1595
1596        return rc;
1597}
1598
1599ssize_t cifs_user_read(struct file *file, char __user *read_data,
1600        size_t read_size, loff_t *poffset)
1601{
1602        int rc = -EACCES;
1603        unsigned int bytes_read = 0;
1604        unsigned int total_read = 0;
1605        unsigned int current_read_size;
1606        struct cifs_sb_info *cifs_sb;
1607        struct cifsTconInfo *pTcon;
1608        int xid;
1609        struct cifsFileInfo *open_file;
1610        char *smb_read_data;
1611        char __user *current_offset;
1612        struct smb_com_read_rsp *pSMBr;
1613
1614        xid = GetXid();
1615        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1616        pTcon = cifs_sb->tcon;
1617
1618        if (file->private_data == NULL) {
1619                FreeXid(xid);
1620                return -EBADF;
1621        }
1622        open_file = (struct cifsFileInfo *)file->private_data;
1623
1624        if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1625                cFYI(1, ("attempting read on write only file instance"));
1626
1627        for (total_read = 0, current_offset = read_data;
1628             read_size > total_read;
1629             total_read += bytes_read, current_offset += bytes_read) {
1630                current_read_size = min_t(const int, read_size - total_read,
1631                                          cifs_sb->rsize);
1632                rc = -EAGAIN;
1633                smb_read_data = NULL;
1634                while (rc == -EAGAIN) {
1635                        int buf_type = CIFS_NO_BUFFER;
1636                        if ((open_file->invalidHandle) &&
1637                            (!open_file->closePend)) {
1638                                rc = cifs_reopen_file(file, true);
1639                                if (rc != 0)
1640                                        break;
1641                        }
1642                        rc = CIFSSMBRead(xid, pTcon,
1643                                         open_file->netfid,
1644                                         current_read_size, *poffset,
1645                                         &bytes_read, &smb_read_data,
1646                                         &buf_type);
1647                        pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1648                        if (smb_read_data) {
1649                                if (copy_to_user(current_offset,
1650                                                smb_read_data +
1651                                                4 /* RFC1001 length field */ +
1652                                                le16_to_cpu(pSMBr->DataOffset),
1653                                                bytes_read))
1654                                        rc = -EFAULT;
1655
1656                                if (buf_type == CIFS_SMALL_BUFFER)
1657                                        cifs_small_buf_release(smb_read_data);
1658                                else if (buf_type == CIFS_LARGE_BUFFER)
1659                                        cifs_buf_release(smb_read_data);
1660                                smb_read_data = NULL;
1661                        }
1662                }
1663                if (rc || (bytes_read == 0)) {
1664                        if (total_read) {
1665                                break;
1666                        } else {
1667                                FreeXid(xid);
1668                                return rc;
1669                        }
1670                } else {
1671                        cifs_stats_bytes_read(pTcon, bytes_read);
1672                        *poffset += bytes_read;
1673                }
1674        }
1675        FreeXid(xid);
1676        return total_read;
1677}
1678
1679
1680static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1681        loff_t *poffset)
1682{
1683        int rc = -EACCES;
1684        unsigned int bytes_read = 0;
1685        unsigned int total_read;
1686        unsigned int current_read_size;
1687        struct cifs_sb_info *cifs_sb;
1688        struct cifsTconInfo *pTcon;
1689        int xid;
1690        char *current_offset;
1691        struct cifsFileInfo *open_file;
1692        int buf_type = CIFS_NO_BUFFER;
1693
1694        xid = GetXid();
1695        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1696        pTcon = cifs_sb->tcon;
1697
1698        if (file->private_data == NULL) {
1699                FreeXid(xid);
1700                return -EBADF;
1701        }
1702        open_file = (struct cifsFileInfo *)file->private_data;
1703
1704        if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1705                cFYI(1, ("attempting read on write only file instance"));
1706
1707        for (total_read = 0, current_offset = read_data;
1708             read_size > total_read;
1709             total_read += bytes_read, current_offset += bytes_read) {
1710                current_read_size = min_t(const int, read_size - total_read,
1711                                          cifs_sb->rsize);
1712                /* For windows me and 9x we do not want to request more
1713                than it negotiated since it will refuse the read then */
1714                if ((pTcon->ses) &&
1715                        !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1716                        current_read_size = min_t(const int, current_read_size,
1717                                        pTcon->ses->server->maxBuf - 128);
1718                }
1719                rc = -EAGAIN;
1720                while (rc == -EAGAIN) {
1721                        if ((open_file->invalidHandle) &&
1722                            (!open_file->closePend)) {
1723                                rc = cifs_reopen_file(file, true);
1724                                if (rc != 0)
1725                                        break;
1726                        }
1727                        rc = CIFSSMBRead(xid, pTcon,
1728                                         open_file->netfid,
1729                                         current_read_size, *poffset,
1730                                         &bytes_read, &current_offset,
1731                                         &buf_type);
1732                }
1733                if (rc || (bytes_read == 0)) {
1734                        if (total_read) {
1735                                break;
1736                        } else {
1737                                FreeXid(xid);
1738                                return rc;
1739                        }
1740                } else {
1741                        cifs_stats_bytes_read(pTcon, total_read);
1742                        *poffset += bytes_read;
1743                }
1744        }
1745        FreeXid(xid);
1746        return total_read;
1747}
1748
1749int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1750{
1751        struct dentry *dentry = file->f_path.dentry;
1752        int rc, xid;
1753
1754        xid = GetXid();
1755        rc = cifs_revalidate(dentry);
1756        if (rc) {
1757                cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1758                FreeXid(xid);
1759                return rc;
1760        }
1761        rc = generic_file_mmap(file, vma);
1762        FreeXid(xid);
1763        return rc;
1764}
1765
1766
1767static void cifs_copy_cache_pages(struct address_space *mapping,
1768        struct list_head *pages, int bytes_read, char *data,
1769        struct pagevec *plru_pvec)
1770{
1771        struct page *page;
1772        char *target;
1773
1774        while (bytes_read > 0) {
1775                if (list_empty(pages))
1776                        break;
1777
1778                page = list_entry(pages->prev, struct page, lru);
1779                list_del(&page->lru);
1780
1781                if (add_to_page_cache(page, mapping, page->index,
1782                                      GFP_KERNEL)) {
1783                        page_cache_release(page);
1784                        cFYI(1, ("Add page cache failed"));
1785                        data += PAGE_CACHE_SIZE;
1786                        bytes_read -= PAGE_CACHE_SIZE;
1787                        continue;
1788                }
1789
1790                target = kmap_atomic(page, KM_USER0);
1791
1792                if (PAGE_CACHE_SIZE > bytes_read) {
1793                        memcpy(target, data, bytes_read);
1794                        /* zero the tail end of this partial page */
1795                        memset(target + bytes_read, 0,
1796                               PAGE_CACHE_SIZE - bytes_read);
1797                        bytes_read = 0;
1798                } else {
1799                        memcpy(target, data, PAGE_CACHE_SIZE);
1800                        bytes_read -= PAGE_CACHE_SIZE;
1801                }
1802                kunmap_atomic(target, KM_USER0);
1803
1804                flush_dcache_page(page);
1805                SetPageUptodate(page);
1806                unlock_page(page);
1807                if (!pagevec_add(plru_pvec, page))
1808                        __pagevec_lru_add_file(plru_pvec);
1809                data += PAGE_CACHE_SIZE;
1810        }
1811        return;
1812}
1813
1814static int cifs_readpages(struct file *file, struct address_space *mapping,
1815        struct list_head *page_list, unsigned num_pages)
1816{
1817        int rc = -EACCES;
1818        int xid;
1819        loff_t offset;
1820        struct page *page;
1821        struct cifs_sb_info *cifs_sb;
1822        struct cifsTconInfo *pTcon;
1823        unsigned int bytes_read = 0;
1824        unsigned int read_size, i;
1825        char *smb_read_data = NULL;
1826        struct smb_com_read_rsp *pSMBr;
1827        struct pagevec lru_pvec;
1828        struct cifsFileInfo *open_file;
1829        int buf_type = CIFS_NO_BUFFER;
1830
1831        xid = GetXid();
1832        if (file->private_data == NULL) {
1833                FreeXid(xid);
1834                return -EBADF;
1835        }
1836        open_file = (struct cifsFileInfo *)file->private_data;
1837        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1838        pTcon = cifs_sb->tcon;
1839
1840        pagevec_init(&lru_pvec, 0);
1841        cFYI(DBG2, ("rpages: num pages %d", num_pages));
1842        for (i = 0; i < num_pages; ) {
1843                unsigned contig_pages;
1844                struct page *tmp_page;
1845                unsigned long expected_index;
1846
1847                if (list_empty(page_list))
1848                        break;
1849
1850                page = list_entry(page_list->prev, struct page, lru);
1851                offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1852
1853                /* count adjacent pages that we will read into */
1854                contig_pages = 0;
1855                expected_index =
1856                        list_entry(page_list->prev, struct page, lru)->index;
1857                list_for_each_entry_reverse(tmp_page, page_list, lru) {
1858                        if (tmp_page->index == expected_index) {
1859                                contig_pages++;
1860                                expected_index++;
1861                        } else
1862                                break;
1863                }
1864                if (contig_pages + i >  num_pages)
1865                        contig_pages = num_pages - i;
1866
1867                /* for reads over a certain size could initiate async
1868                   read ahead */
1869
1870                read_size = contig_pages * PAGE_CACHE_SIZE;
1871                /* Read size needs to be in multiples of one page */
1872                read_size = min_t(const unsigned int, read_size,
1873                                  cifs_sb->rsize & PAGE_CACHE_MASK);
1874                cFYI(DBG2, ("rpages: read size 0x%x  contiguous pages %d",
1875                                read_size, contig_pages));
1876                rc = -EAGAIN;
1877                while (rc == -EAGAIN) {
1878                        if ((open_file->invalidHandle) &&
1879                            (!open_file->closePend)) {
1880                                rc = cifs_reopen_file(file, true);
1881                                if (rc != 0)
1882                                        break;
1883                        }
1884
1885                        rc = CIFSSMBRead(xid, pTcon,
1886                                         open_file->netfid,
1887                                         read_size, offset,
1888                                         &bytes_read, &smb_read_data,
1889                                         &buf_type);
1890                        /* BB more RC checks ? */
1891                        if (rc == -EAGAIN) {
1892                                if (smb_read_data) {
1893                                        if (buf_type == CIFS_SMALL_BUFFER)
1894                                                cifs_small_buf_release(smb_read_data);
1895                                        else if (buf_type == CIFS_LARGE_BUFFER)
1896                                                cifs_buf_release(smb_read_data);
1897                                        smb_read_data = NULL;
1898                                }
1899                        }
1900                }
1901                if ((rc < 0) || (smb_read_data == NULL)) {
1902                        cFYI(1, ("Read error in readpages: %d", rc));
1903                        break;
1904                } else if (bytes_read > 0) {
1905                        task_io_account_read(bytes_read);
1906                        pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1907                        cifs_copy_cache_pages(mapping, page_list, bytes_read,
1908                                smb_read_data + 4 /* RFC1001 hdr */ +
1909                                le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1910
1911                        i +=  bytes_read >> PAGE_CACHE_SHIFT;
1912                        cifs_stats_bytes_read(pTcon, bytes_read);
1913                        if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1914                                i++; /* account for partial page */
1915
1916                                /* server copy of file can have smaller size
1917                                   than client */
1918                                /* BB do we need to verify this common case ?
1919                                   this case is ok - if we are at server EOF
1920                                   we will hit it on next read */
1921
1922                                /* break; */
1923                        }
1924                } else {
1925                        cFYI(1, ("No bytes read (%d) at offset %lld . "
1926                                 "Cleaning remaining pages from readahead list",
1927                                 bytes_read, offset));
1928                        /* BB turn off caching and do new lookup on
1929                           file size at server? */
1930                        break;
1931                }
1932                if (smb_read_data) {
1933                        if (buf_type == CIFS_SMALL_BUFFER)
1934                                cifs_small_buf_release(smb_read_data);
1935                        else if (buf_type == CIFS_LARGE_BUFFER)
1936                                cifs_buf_release(smb_read_data);
1937                        smb_read_data = NULL;
1938                }
1939                bytes_read = 0;
1940        }
1941
1942        pagevec_lru_add_file(&lru_pvec);
1943
1944/* need to free smb_read_data buf before exit */
1945        if (smb_read_data) {
1946                if (buf_type == CIFS_SMALL_BUFFER)
1947                        cifs_small_buf_release(smb_read_data);
1948                else if (buf_type == CIFS_LARGE_BUFFER)
1949                        cifs_buf_release(smb_read_data);
1950                smb_read_data = NULL;
1951        }
1952
1953        FreeXid(xid);
1954        return rc;
1955}
1956
1957static int cifs_readpage_worker(struct file *file, struct page *page,
1958        loff_t *poffset)
1959{
1960        char *read_data;
1961        int rc;
1962
1963        page_cache_get(page);
1964        read_data = kmap(page);
1965        /* for reads over a certain size could initiate async read ahead */
1966
1967        rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1968
1969        if (rc < 0)
1970                goto io_error;
1971        else
1972                cFYI(1, ("Bytes read %d", rc));
1973
1974        file->f_path.dentry->d_inode->i_atime =
1975                current_fs_time(file->f_path.dentry->d_inode->i_sb);
1976
1977        if (PAGE_CACHE_SIZE > rc)
1978                memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1979
1980        flush_dcache_page(page);
1981        SetPageUptodate(page);
1982        rc = 0;
1983
1984io_error:
1985        kunmap(page);
1986        page_cache_release(page);
1987        return rc;
1988}
1989
1990static int cifs_readpage(struct file *file, struct page *page)
1991{
1992        loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1993        int rc = -EACCES;
1994        int xid;
1995
1996        xid = GetXid();
1997
1998        if (file->private_data == NULL) {
1999                FreeXid(xid);
2000                return -EBADF;
2001        }
2002
2003        cFYI(1, ("readpage %p at offset %d 0x%x\n",
2004                 page, (int)offset, (int)offset));
2005
2006        rc = cifs_readpage_worker(file, page, &offset);
2007
2008        unlock_page(page);
2009
2010        FreeXid(xid);
2011        return rc;
2012}
2013
2014static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2015{
2016        struct cifsFileInfo *open_file;
2017
2018        read_lock(&GlobalSMBSeslock);
2019        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2020                if (open_file->closePend)
2021                        continue;
2022                if (open_file->pfile &&
2023                    ((open_file->pfile->f_flags & O_RDWR) ||
2024                     (open_file->pfile->f_flags & O_WRONLY))) {
2025                        read_unlock(&GlobalSMBSeslock);
2026                        return 1;
2027                }
2028        }
2029        read_unlock(&GlobalSMBSeslock);
2030        return 0;
2031}
2032
2033/* We do not want to update the file size from server for inodes
2034   open for write - to avoid races with writepage extending
2035   the file - in the future we could consider allowing
2036   refreshing the inode only on increases in the file size
2037   but this is tricky to do without racing with writebehind
2038   page caching in the current Linux kernel design */
2039bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2040{
2041        if (!cifsInode)
2042                return true;
2043
2044        if (is_inode_writable(cifsInode)) {
2045                /* This inode is open for write at least once */
2046                struct cifs_sb_info *cifs_sb;
2047
2048                cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2049                if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2050                        /* since no page cache to corrupt on directio
2051                        we can change size safely */
2052                        return true;
2053                }
2054
2055                if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2056                        return true;
2057
2058                return false;
2059        } else
2060                return true;
2061}
2062
2063static int cifs_write_begin(struct file *file, struct address_space *mapping,
2064                        loff_t pos, unsigned len, unsigned flags,
2065                        struct page **pagep, void **fsdata)
2066{
2067        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2068        loff_t offset = pos & (PAGE_CACHE_SIZE - 1);
2069        loff_t page_start = pos & PAGE_MASK;
2070        loff_t i_size;
2071        struct page *page;
2072        int rc = 0;
2073
2074        cFYI(1, ("write_begin from %lld len %d", (long long)pos, len));
2075
2076        page = __grab_cache_page(mapping, index);
2077        if (!page) {
2078                rc = -ENOMEM;
2079                goto out;
2080        }
2081
2082        if (PageUptodate(page))
2083                goto out;
2084
2085        /*
2086         * If we write a full page it will be up to date, no need to read from
2087         * the server. If the write is short, we'll end up doing a sync write
2088         * instead.
2089         */
2090        if (len == PAGE_CACHE_SIZE)
2091                goto out;
2092
2093        /*
2094         * optimize away the read when we have an oplock, and we're not
2095         * expecting to use any of the data we'd be reading in. That
2096         * is, when the page lies beyond the EOF, or straddles the EOF
2097         * and the write will cover all of the existing data.
2098         */
2099        if (CIFS_I(mapping->host)->clientCanCacheRead) {
2100                i_size = i_size_read(mapping->host);
2101                if (page_start >= i_size ||
2102                    (offset == 0 && (pos + len) >= i_size)) {
2103                        zero_user_segments(page, 0, offset,
2104                                           offset + len,
2105                                           PAGE_CACHE_SIZE);
2106                        /*
2107                         * PageChecked means that the parts of the page
2108                         * to which we're not writing are considered up
2109                         * to date. Once the data is copied to the
2110                         * page, it can be set uptodate.
2111                         */
2112                        SetPageChecked(page);
2113                        goto out;
2114                }
2115        }
2116
2117        if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2118                /*
2119                 * might as well read a page, it is fast enough. If we get
2120                 * an error, we don't need to return it. cifs_write_end will
2121                 * do a sync write instead since PG_uptodate isn't set.
2122                 */
2123                cifs_readpage_worker(file, page, &page_start);
2124        } else {
2125                /* we could try using another file handle if there is one -
2126                   but how would we lock it to prevent close of that handle
2127                   racing with this read? In any case
2128                   this will be written out by write_end so is fine */
2129        }
2130out:
2131        *pagep = page;
2132        return rc;
2133}
2134
2135const struct address_space_operations cifs_addr_ops = {
2136        .readpage = cifs_readpage,
2137        .readpages = cifs_readpages,
2138        .writepage = cifs_writepage,
2139        .writepages = cifs_writepages,
2140        .write_begin = cifs_write_begin,
2141        .write_end = cifs_write_end,
2142        .set_page_dirty = __set_page_dirty_nobuffers,
2143        /* .sync_page = cifs_sync_page, */
2144        /* .direct_IO = */
2145};
2146
2147/*
2148 * cifs_readpages requires the server to support a buffer large enough to
2149 * contain the header plus one complete page of data.  Otherwise, we need
2150 * to leave cifs_readpages out of the address space operations.
2151 */
2152const struct address_space_operations cifs_addr_ops_smallbuf = {
2153        .readpage = cifs_readpage,
2154        .writepage = cifs_writepage,
2155        .writepages = cifs_writepages,
2156        .write_begin = cifs_write_begin,
2157        .write_end = cifs_write_end,
2158        .set_page_dirty = __set_page_dirty_nobuffers,
2159        /* .sync_page = cifs_sync_page, */
2160        /* .direct_IO = */
2161};